removing old/obselete tests from repo

Change-Id: I5f6870ba9ec583fcdc560146c2289c9b0130ce66
diff --git a/Dockerfile.onos-builder b/Dockerfile.onos-builder
deleted file mode 100644
index 8a793c6..0000000
--- a/Dockerfile.onos-builder
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 8.11 == jessie
-FROM debian:8.11
-MAINTAINER A R Karthick <kramanar@ciena.com>
-
-# Add Java 8 repository
-ENV DEBIAN_FRONTEND noninteractive
-RUN echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
-    echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list && \
-    echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list && \
-    apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
-
-# Set the environment variables
-ENV HOME /root
-ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
-ENV ONOS_ROOT /src/onos
-ENV KARAF_VERSION 3.0.8
-ENV KARAF_ROOT /root/onos/apache-karaf-3.0.8
-ENV KARAF_LOG /root/onos/apache-karaf-3.0.8/data/log/karaf.log
-ENV BUILD_NUMBER docker
-ENV PATH $PATH:$KARAF_ROOT/bin
-#Download and Build ONOS
-RUN     apt-get update && apt-get install -y python git less zip curl oracle-java8-installer oracle-java8-set-default
-RUN     mkdir -p /src
-WORKDIR /src
-CMD bash -c "git clone https://github.com/opennetworkinglab/onos.git && \
-        cd onos && \
-        tools/build/onos-buck build onos && \
-        cp buck-out/gen/tools/package/onos-package/onos.tar.gz /root/cord-tester && \
-        rm -rf /src/onos"
-
diff --git a/Dockerfile.quagga b/Dockerfile.quagga
deleted file mode 100644
index 2a410e4..0000000
--- a/Dockerfile.quagga
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-FROM ubuntu:14.04
-MAINTAINER chetan@ciena.com
-WORKDIR /root
-RUN useradd -M quagga
-RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga
-RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga
-RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev pkg-config protobuf-c-compiler
-RUN git clone git://git.savannah.nongnu.org/quagga.git quagga && \
-(cd quagga && git checkout quagga-1.0.20160315 && ./bootstrap.sh && \
-sed -i -r 's,htonl.*?\(INADDR_LOOPBACK\),inet_addr\("10.10.0.4"\),g' zebra/zebra_fpm.c && \
-./configure --enable-fpm --disable-doc --localstatedir=/var/run/quagga && make && make install)
-RUN ldconfig
-CMD [ "/bin/bash" ]
diff --git a/Dockerfile.tester b/Dockerfile.tester
deleted file mode 100644
index ec9c559..0000000
--- a/Dockerfile.tester
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-FROM ubuntu:14.04
-MAINTAINER chetan@ciena.com
-RUN apt-get update  && \
-    apt-get install -y git git-core autoconf automake autotools-dev pkg-config \
-        make gcc g++ libtool libc6-dev cmake libpcap-dev libxerces-c2-dev  \
-        unzip libpcre3-dev flex bison libboost-dev \
-        python python-pip python-setuptools python-scapy tcpdump doxygen doxypy wget \
-        openvswitch-common openvswitch-switch \
-        python-twisted python-sqlite sqlite3 python-pexpect telnet arping isc-dhcp-server \
-        python-paramiko python-maas-client python-keystoneclient python-neutronclient python-glanceclient \
-        python-novaclient python-dev libffi-dev libssl-dev
-RUN easy_install nose
-RUN mkdir -p /root/ovs
-WORKDIR /root
-RUN wget http://openvswitch.org/releases/openvswitch-2.5.0.tar.gz -O /root/ovs/openvswitch-2.5.0.tar.gz && \
-(cd /root/ovs && tar zxpvf openvswitch-2.5.0.tar.gz && \
- cd openvswitch-2.5.0 && \
- ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install)
-RUN service openvswitch-switch restart || /bin/true
-RUN pip install scapy==2.3.2 scapy-ssl_tls==1.2.2 monotonic configObj docker-py pyyaml nsenter pyroute2 netaddr python-daemon
-RUN pip install -U cryptography
-RUN pip install -U paramiko
-RUN mv /usr/sbin/tcpdump /sbin/
-RUN ln -sf /sbin/tcpdump /usr/sbin/tcpdump
-RUN mv /usr/sbin/dhcpd /sbin/
-RUN ln -sf /sbin/dhcpd /usr/sbin/dhcpd
-RUN mv /sbin/dhclient /usr/sbin/
-RUN ln -sf /usr/sbin/dhclient /sbin/dhclient
-WORKDIR /root
-RUN wget -nc http://de.archive.ubuntu.com/ubuntu/pool/main/b/bison/bison_2.5.dfsg-2.1_amd64.deb \
-         http://de.archive.ubuntu.com/ubuntu/pool/main/b/bison/libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN sudo dpkg -i bison_2.5.dfsg-2.1_amd64.deb libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN rm bison_2.5.dfsg-2.1_amd64.deb libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN wget -nc http://www.nbee.org/download/nbeesrc-jan-10-2013.zip && \
-    unzip nbeesrc-jan-10-2013.zip && \
-    cd nbeesrc-jan-10-2013/src && cmake . && make && \
-    cp ../bin/libn*.so /usr/local/lib && ldconfig && \
-    cp -R ../include/* /usr/include/
-WORKDIR /root
-RUN git clone https://github.com/CPqD/ofsoftswitch13.git && \
-    cd ofsoftswitch13 && \
-    ./boot.sh && \
-    ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && \
-    make && make install
-CMD ["/bin/bash"]
diff --git a/Vagrantfile b/Vagrantfile
deleted file mode 100644
index fb22eb4..0000000
--- a/Vagrantfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure(2) do |config|
-
-  if (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil
-    config.vm.synced_folder ".", "/cord-tester", mount_options: ["dmode=700,fmode=600"]
-  else
-    config.vm.synced_folder ".", "/cord-tester"
-  end
-
-  config.vm.define "cordtest" do |d|
-    d.vm.box = "ubuntu/trusty64"
-    d.vm.hostname = "cordtest"
-    d.vm.network "private_network", ip: "10.100.198.202"
-    d.vm.provision :shell, path: "src/test/setup/prerequisites.sh"
-    d.vm.provider "virtualbox" do |v|
-      v.memory = 3000
-    end
-  end
-
-  if Vagrant.has_plugin?("vagrant-cachier")
-    config.cache.scope = :box
-  end
-
-end
diff --git a/apply_license.sh b/apply_license.sh
deleted file mode 100755
index e8fb4b4..0000000
--- a/apply_license.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#!/usr/bin/env bash
-for f in `find . -name "*.py"`; do
-    if ! grep -q Copyright $f; then
-        cat COPYRIGHT.txt $f > $f.license.py
-        mv $f.license.py $f
-        if grep -q "^\#\!/usr/bin" $f; then
-          #prepend shebang for python
-          sed -i -e '/^\#\!\/usr\/bin/d' -e '1i\#\!/usr/bin/env python' $f
-          chmod +x $f
-        fi
-    fi
-done
diff --git a/build-onos-docker.sh b/build-onos-docker.sh
deleted file mode 100755
index 6b4e693..0000000
--- a/build-onos-docker.sh
+++ /dev/null
@@ -1,160 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#!/usr/bin/env bash
-
-function show_help {
-    echo "Usage: ${0#*/} -h | this help -o <onos source path> -t <onos docker tag> -p <onos package> -b | build onos package -u |update onos source"
-    exit 1
-}
-
-OPTIND=1
-onos_src_dir="$HOME/onos"
-onos_tag="test/onos:clustertest"
-onos_package=
-onos_build=0
-onos_update=0
-
-while getopts "h?o:t:p:bu" opt; do
-    case "$opt" in
-        h|\?)
-            show_help
-            ;;
-        o)
-            onos_src_dir=$OPTARG
-            ;;
-        p)
-            onos_package=$OPTARG
-            ;;
-        t)
-            onos_tag=$OPTARG
-            ;;
-        b)
-            onos_build=1
-            ;;
-        u)
-            onos_update=1
-            ;;
-        *)
-            show_help
-            ;;
-    esac
-done
-
-shift $((OPTIND-1))
-if [ $# -gt 0 ]; then
-    echo "Invalid arguments"
-    show_help
-fi
-mydir=$(dirname $(realpath $0))
-if [ x"$onos_package" = "x" ]; then
-    if [ ! -d $onos_src_dir ]; then
-        onos_build=1
-    fi
-    onos_package=$onos_src_dir/buck-out/gen/tools/package/onos-package/onos.tar.gz
-fi
-
-function build_onos {
-    if [ ! -f $mydir/Dockerfile.onos-builder ]; then
-        echo "Dockerfile.onos-builder not found. Copy this file from cord-tester project before resuming the build"
-        exit 127
-    fi
-    docker images | grep ^cord-tester-onos-builder || docker build -t cord-tester-onos-builder:latest -f $mydir/Dockerfile.onos-builder $mydir
-    docker run -v $mydir:/root/cord-tester --rm cord-tester-onos-builder:latest
-    return $?
-}
-
-#if onos package is not built, then exit
-if [ $onos_build -eq 1 ]; then
-    if [ ! -d $onos_src_dir ]; then
-        build_onos
-        ret=$?
-        if [ $ret -ne 0 ]; then
-            echo "Failed to build ONOS. Exiting"
-            exit 127
-        fi
-        onos_package=$mydir/onos.tar.gz
-    else
-      if [ $onos_update -eq 1 ]; then
-          echo "Updating ONOS source"
-          ( cd $onos_src_dir && git pull --ff-only origin master || git clone http://github.com/opennetworkinglab/onos.git . )
-      fi
-      ( cd $onos_src_dir && tools/build/onos-buck build onos ) && echo "ONOS build success" || {
-        echo "ONOS build failure. Exiting ..." && exit 1
-      }
-      onos_package=$onos_src_dir/buck-out/gen/tools/package/onos-package/onos.tar.gz
-    fi
-fi
-
-if [ ! -f $onos_package ]; then
-    echo "ONOS package $onos_package does not exist. Exiting ..."
-    exit 1
-fi
-
-if [ $onos_package != $mydir/onos.tar.gz ]; then
-    cp -v $onos_package $mydir/onos.tar.gz
-fi
-
-function finish {
-    rm -f onos.tar.gz
-    rm -f Dockerfile.cord-tester
-}
-
-trap finish EXIT
-
-#create a ONOS docker file
-cat > $mydir/Dockerfile.cord-tester <<EOF
-FROM onosproject/onos:latest
-
-MAINTAINER Ali Al-Shabibi <ali@onlab.us>
-
-# Add Java 8 repository
-# Set the environment variables
-ENV HOME /root
-ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
-ENV ONOS_ROOT /src/onos
-ENV KARAF_VERSION 3.0.8
-ENV KARAF_ROOT /root/onos/apache-karaf-3.0.8
-ENV KARAF_LOG /root/onos/apache-karaf-3.0.8/data/log/karaf.log
-ENV BUILD_NUMBER docker
-ENV PATH \$PATH:\$KARAF_ROOT/bin
-
-#Download and Build ONOS
-# Change to /root directory
-WORKDIR /root
-COPY ./onos.tar.gz /tmp
-#Install ONOS
-
-RUN rm -rf onos && mkdir onos && \
-   mv /tmp/onos.tar.gz . && \
-   tar -xf onos.tar.gz -C onos --strip-components=1 && \
-   rm -rf onos.tar.gz
-
-
-# Ports
-# 6653 - OpenFlow
-# 8181 - GUI
-# 8101 - ONOS CLI
-# 9876 - ONOS CLUSTER COMMUNICATION
-EXPOSE 6653 8181 8101 9876 5005
-
-# Get ready to run command
-WORKDIR /root/onos
-ENTRYPOINT ["./bin/onos-service"]
-EOF
-
-#Now build the docker image
-docker build -t $onos_tag -f $mydir/Dockerfile.cord-tester $mydir
diff --git a/build.gradle b/build.gradle
deleted file mode 100644
index f4979ba..0000000
--- a/build.gradle
+++ /dev/null
@@ -1,299 +0,0 @@
-
-/*
- * Copyright 2017-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import org.opencord.gradle.rules.*
-import org.yaml.snakeyaml.Yaml
-
-allprojects {
-    apply plugin: 'base'
-    apply plugin: 'de.gesellix.docker'
-    //apply plugin: 'com.tmiyamon.config'
-
-    docker {
-        // dockerHost = System.env.DOCKER_HOST ?: 'unix:///var/run/docker.sock'
-        // dockerHost = System.env.DOCKER_HOST ?: 'https://192.168.99.100:2376'
-        // certPath = System.getProperty('docker.cert.path') ?: "${System.getProperty('user.home')}/.docker/machine/machines/default"
-        // authConfigPlain = [
-        //   "username"       : "joe",
-        //   "password"       : "some-pw-as-needed",
-        //   "email"          : "joe@acme.com",
-        //   "serveraddress"  : "https://index.docker.io/v1/"
-        //  ]
-    }
-}
-
-ext {
-    // Deployment target config file (yaml format); this can be overwritten from the command line
-    // using the -PdeployConfig=<file-path> syntax.
-    deployConfig = project.hasProperty('deployConfig') ? project.getProperty('deployConfig') : './config/default.yml'
-
-    println "Using deployment config: $deployConfig"
-    File configFile = new File(deployConfig)
-    def yaml = new Yaml()
-    config = yaml.load(configFile.newReader())
-
-    // Upstream registry to simplify filling out the comps table below
-    upstreamReg = project.hasProperty('upstreamReg') ? project.getProperty('upstreamReg') : 'docker.io'
-
-    // Target registry to be used to publish docker images needed for deployment
-    targetReg = project.hasProperty('targetReg')
-        ? project.getProperty('targetReg')
-        : config.docker && config.docker.registry
-            ? config.docker.registry
-            : config.seedServer.ip
-                ? config.seedServer.ip + ":5000"
-                : 'localhost:5000'
-
-    // The tag used to tag the docker images push to the target registry
-    targetTag = project.hasProperty('targetTag')
-        ? project.getProperty('targetTag')
-        : config.docker && config.docker.imageVersion
-            ? config.docker.imageVersion
-            : 'candidate'
-
-    cordTesterPath = project.hasProperty('cordTesterPath') ? project.getProperty('cordTesterPath') : './src/test/setup'
-
-    dockerPath = project.hasProperty('dockerPath') ? project.getProperty('dockerPath') : '/usr/bin'
-
-    cordTesterImages = [ 'cordtest/radius:candidate' : 'Dockerfile.radius', 'cordtest/quagga:candidate' : 'Dockerfile.quagga', 'cordtest/nose:candidate' : 'Dockerfile.tester' ]
-
-}
-
-List.metaClass.asParam = { prefix, sep ->
-  if (delegate.size() == 0) {
-    ""
-  }
-  String result = "--" + prefix + "="
-  String p = ""
-  delegate.each {
-    result += p + "${it}"
-    p = sep
-  }
-  result
-}
-
-List.metaClass.p = { value, name ->
-  if (value != null && value != "") {
-      delegate << name + "=" + value
-  } else {
-      delegate
-  }
-}
-
-List.metaClass.p = { spec ->
-  if (spec != null && spec != "") {
-      delegate += spec
-  } else {
-      delegate
-  }
-}
-
-// ~~~~~~~~~~~~~~~~~~~ Global tasks ~~~~~~~~~~~~~~~~~~~~~~~
-
-// To be used to fetch upstream binaries, clone repos, etc.
-task fetch {
-    //commandLine "$cordTesterPath/onos_pull.sh", 'latest'
-}
-
-// To be used to generate all needed binaries that need to be present on the target
-// as docker images in the local docker runner.
-task buildImages {
-    // ...
-    cordTesterImages.each { tag, dockerfile ->
-        println "Building Docker image ${tag} using ${dockerfile}"
-        exec {
-             executable "$dockerPath/docker"
-             args "build", "-t", "${tag}", "-f", "${dockerfile}", "."
-        }
-    }
-}
-
-task buildRadiusImage(type: Exec) {
-    commandLine "$dockerPath/docker", 'build', '-t', 'cordtest/radius', '-f', 'Dockerfile.radius', '.'
-}
-
-task tagRadiusImage(type: Exec) {
-   dependsOn buildRadiusImage
-   commandLine "$dockerPath/docker", 'tag', 'cordtest/radius', "$targetReg/cordtest/radius:$targetTag"
-}
-
-task publishRadiusImage(type: Exec) {
-    dependsOn tagRadiusImage
-    commandLine "$dockerPath/docker", 'push', "$targetReg/cordtest/radius:$targetTag"
-}
-
-task buildQuaggaImage(type: Exec) {
-    commandLine "$dockerPath/docker", 'build', '-t', 'cordtest/quagga', '-f', 'Dockerfile.quagga', '.'
-}
-
-task tagQuaggaImage(type: Exec) {
-   dependsOn buildQuaggaImage
-   commandLine "$dockerPath/docker", 'tag', 'cordtest/quagga', "$targetReg/cordtest/quagga:$targetTag"
-}
-
-task publishQuaggaImage(type: Exec) {
-    dependsOn tagQuaggaImage
-    commandLine "$dockerPath/docker", 'push', "$targetReg/cordtest/quagga:$targetTag"
-}
-
-task buildTesterImage(type: Exec) {
-    commandLine "$dockerPath/docker", 'build', '-t', 'cordtest/nose', '-f', 'Dockerfile.tester', '.'
-}
-
-task tagTesterImage(type: Exec) {
-   dependsOn buildTesterImage
-   commandLine "$dockerPath/docker", 'tag', 'cordtest/nose', "$targetReg/cordtest/nose:$targetTag"
-}
-
-task publishTesterImage(type: Exec) {
-    dependsOn tagTesterImage
-    commandLine "$dockerPath/docker", 'push', "$targetReg/cordtest/nose:$targetTag"
-}
-
-// Publish image(s) built during the build step into targetReg registry using the targetTag
-// tag. See maas subproject for examples on how to do this.
-task publishImages {
-     dependsOn publishTesterImage
-     dependsOn publishQuaggaImage
-     dependsOn publishRadiusImage
-}
-
-task publish {
-    dependsOn publishImages
-}
-
-task deployBase (type: Exec) {
-    executable = "ansible-playbook"
-    args = ["-i", config.seedServer.ip + ',']
-
-    if ( config.seedServer.ansible_user != null && config.seedServer.ansible_user != "" ) {
-        args = args << "--user=$config.seedServer.ansible_user"
-    }
-
-
-    if ( config.debug ) {
-        args = args << "-vvvv"
-    }
-
-    def extraVars = []
-    if (config.seedServer) {
-        extraVars = extraVars.p(config.seedServer.extraVars)
-            .p(config.seedServer.ansible_ssh_pass, "ansible_ssh_pass")
-            .p(config.seedServer.ansible_sudo_pass, "ansible_sudo_pass")
-            .p(config.seedServer.fabric_ip, "fabric_ip")
-	    .p(config.seedServer.management_ip, "management_ip")
-            .p(config.seedServer.management_gw, "management_gw")
-            .p(config.seedServer.management_network, "management_network")
-	    .p(config.seedServer.management_iface, "management_iface")
-	    .p(config.seedServer.external_ip, "external_ip")
-            .p(config.seedServer.external_gw, "external_gw")
-            .p(config.seedServer.external_network, "external_network")
-            .p(config.seedServer.external_iface, "external_iface")
-	    .p(config.seedServer.fabric_ip, "fabric_ip")
-	    .p(config.seedServer.fabric_network, "fabric_network")
-	    .p(config.seedServer.fabric_iface, "fabric_iface")
-            .p(config.seedServer.domain, "domain")
-            .p(config.seedServer.virtualbox_support, "virtualbox_support")
-	    .p(config.seedServer.power_helper_user, "power_helper_user")
-	    .p(config.seedServer.power_helper_host, "power_helper_host")
-            .p(config.seedServer.ansible_ssh_port, "ansible_ssh_port")
-    }
-
-    if (config.otherServers) {
-        extraVars = extraVars.p(config.otherServers.location, "prov_location")
-        .p(config.otherServers.rolesPath, "prov_role_path")
-        .p(config.otherServers.role, "prov_role")
-    }
-
-    if (config.docker) {
-        extraVars = extraVars.p(config.docker.registry, "docker_registry")
-            .p(config.docker.imageVersion, "docker_image_version")
-    }
-
-    def skipTags = [].p(config.seedServer.skipTags)
-
-    args = args.p(skipTags.asParam("skip-tags", ",")).p(extraVars.asParam("extra-vars", " ")) << "cord-tester-deploy.yml"
-}
-
-task verify (type: Exec) {
-    executable = "ansible-playbook"
-    args = ["-i", config.seedServer.ip + ',']
-
-    if ( config.seedServer.ansible_user != null && config.seedServer.ansible_user != "" ) {
-        args = args << "--user=$config.seedServer.ansible_user"
-    }
-
-
-    if ( config.debug ) {
-        args = args << "-vvvv"
-    }
-
-    def extraVars = []
-    if (config.seedServer) {
-        extraVars = extraVars.p(config.seedServer.extraVars)
-            .p(config.seedServer.ansible_ssh_pass, "ansible_ssh_pass")
-            .p(config.seedServer.ansible_sudo_pass, "ansible_sudo_pass")
-            .p(config.seedServer.fabric_ip, "fabric_ip")
-	    .p(config.seedServer.management_ip, "management_ip")
-            .p(config.seedServer.management_gw, "management_gw")
-            .p(config.seedServer.management_network, "management_network")
-	    .p(config.seedServer.management_iface, "management_iface")
-	    .p(config.seedServer.external_ip, "external_ip")
-            .p(config.seedServer.external_gw, "external_gw")
-            .p(config.seedServer.external_network, "external_network")
-            .p(config.seedServer.external_iface, "external_iface")
-	    .p(config.seedServer.fabric_ip, "fabric_ip")
-	    .p(config.seedServer.fabric_network, "fabric_network")
-	    .p(config.seedServer.fabric_iface, "fabric_iface")
-            .p(config.seedServer.domain, "domain")
-            .p(config.seedServer.virtualbox_support, "virtualbox_support")
-	    .p(config.seedServer.power_helper_user, "power_helper_user")
-	    .p(config.seedServer.power_helper_host, "power_helper_host")
-            .p(config.seedServer.ansible_ssh_port, "ansible_ssh_port")
-    }
-
-    if (config.otherServers) {
-        extraVars = extraVars.p(config.otherServers.location, "prov_location")
-        .p(config.otherServers.rolesPath, "prov_role_path")
-        .p(config.otherServers.role, "prov_role")
-    }
-
-    if (config.docker) {
-        extraVars = extraVars.p(config.docker.registry, "docker_registry")
-            .p(config.docker.imageVersion, "docker_image_version")
-    }
-
-    def skipTags = [].p(config.seedServer.skipTags)
-
-    args = args.p(skipTags.asParam("skip-tags", ",")).p(extraVars.asParam("extra-vars", " ")) << "cord-tester-verify.yml"
-}
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
deleted file mode 100644
index e9313e8..0000000
--- a/buildSrc/build.gradle
+++ /dev/null
@@ -1,49 +0,0 @@
-
-/*
- * Copyright 2017-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-apply plugin: 'groovy'
-
-repositories {
-    // maven { url 'https://repo.gradle.org/gradle/libs' }
-    maven { url 'https://plugins.gradle.org/m2/' }
-    // mavenCentral()
-}
-
-dependencies {
-    compile gradleApi()
-    compile localGroovy()
-    compile 'de.gesellix:gradle-docker-plugin:2016-05-05T13-15-11'
-    compile 'org.yaml:snakeyaml:1.10'
-    //compile 'gradle.plugin.com.tmiyamon:gradle-config:0.2.1'
-}
diff --git a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerFetchRule.groovy b/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerFetchRule.groovy
deleted file mode 100644
index a9bb91b..0000000
--- a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerFetchRule.groovy
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.opencord.gradle.rules
-
-import org.gradle.api.Rule
-import de.gesellix.gradle.docker.tasks.DockerPullTask
-
-
-/**
- * Gradle Rule class to fetch a docker image
- */
-class DockerFetchRule implements Rule {
-
-    def project
-
-    DockerFetchRule(project) {
-        this.project = project
-    }
-
-    String getDescription() {
-        'Rule Usage: fetch<component-name>'
-    }
-
-    void apply(String taskName) {
-        if (taskName.startsWith('fetch')) {
-            project.task(taskName, type: DockerPullTask) {
-                ext.compName = taskName - 'fetch'
-                def spec = project.comps[ext.compName]
-                imageName = spec.name + '@' + spec.digest
-            }
-        }
-    }
-}
diff --git a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerPublishRule.groovy b/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerPublishRule.groovy
deleted file mode 100644
index a1d8164..0000000
--- a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerPublishRule.groovy
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.opencord.gradle.rules
-
-import org.gradle.api.Rule
-import de.gesellix.gradle.docker.tasks.DockerPushTask
-
-
-/**
- * Gradle Rule class to publish (push) a docker image to a private repo
- */
-class DockerPublishRule implements Rule {
-
-    def project
-
-    DockerPublishRule(project) {
-        this.project = project
-    }
-
-    String getDescription() {
-        'Rule Usage: publish<component-name>'
-    }
-
-    void apply(String taskName) {
-        if (taskName.startsWith('publish')) {
-            project.task(taskName, type: DockerPushTask) {
-                ext.compName = taskName - 'publish'
-                println "Publish rule: $taskName + $compName"
-                def tagTask = "tag$compName"
-                println "Tagtask: $tagTask"
-                dependsOn tagTask
-                def spec = project.comps[ext.compName]
-                repositoryName = spec.name + ':' + project.targetTag
-                registry = project.targetReg
-            }
-        }
-    }
-}
diff --git a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerTagRule.groovy b/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerTagRule.groovy
deleted file mode 100644
index 474e16d..0000000
--- a/buildSrc/src/main/groovy/org/opencord/gradle/rules/DockerTagRule.groovy
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.opencord.gradle.rules
-
-import org.gradle.api.Rule
-import de.gesellix.gradle.docker.tasks.DockerTagTask
-
-
-/**
- * Gradle Rule class to tag a docker image
- */
-class DockerTagRule implements Rule {
-
-    def project
-
-    DockerTagRule(project) {
-        this.project = project
-    }
-
-    String getDescription() {
-        'Rule Usage: tag<component-name>'
-    }
-
-    void apply(String taskName) {
-        if (taskName.startsWith('tag') && !taskName.equals('tag')) {
-            project.task(taskName, type: DockerTagTask) {
-                ext.compName = taskName - 'tag'
-                def spec = project.comps[compName]
-                imageId = spec.name + '@' + spec.digest
-                tag = compName + ':' + project.targetTag
-            }
-        }
-    }
-}
diff --git a/buildSrc/src/main/groovy/org/opencord/gradle/rules/GitSubmoduleUpdateRule.groovy b/buildSrc/src/main/groovy/org/opencord/gradle/rules/GitSubmoduleUpdateRule.groovy
deleted file mode 100644
index 3b46424..0000000
--- a/buildSrc/src/main/groovy/org/opencord/gradle/rules/GitSubmoduleUpdateRule.groovy
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2012 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.opencord.gradle.rules
-
-import org.gradle.api.Rule
-import org.gradle.api.tasks.Exec
-
-
-/**
- * Gradle Rule class to fetch a docker image
- */
-class GitSubmoduleUpdateRule implements Rule {
-
-    def project
-
-    GitSubmoduleUpdateRule(project) {
-        this.project = project
-    }
-
-    String getDescription() {
-        'Rule Usage: gitupdate<component-name>'
-    }
-
-    void apply(String taskName) {
-        if (taskName.startsWith('gitupdate')) {
-            project.task(taskName, type: Exec) {
-                ext.compName = taskName - 'gitupdate'
-                def spec = project.comps[ext.compName]
-                workingDir = '.'
-                commandLine '/usr/bin/git', 'submodule', 'update', '--init', '--recursive', spec.componentDir
-            }
-        }
-    }
-}
diff --git a/config/default.yml b/config/default.yml
deleted file mode 100644
index 20ac754..0000000
--- a/config/default.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Deployment configuration for VirtualBox based head node.
-#
-# This deployment configuration can be utilized with the head node created
-# via `vargrant up headnode` from the gerrit.opencord.org/maas repository.
----
-seedServer:
-  ip: '10.100.198.201'
-
-  # User name and password used by Ansible to connect to the host for remote
-  # provisioning
-  user: 'vagrant'
-  password: 'vagrant'
-
-  # Specifies tasks within the head node provisioning not to execute, including:
-  #
-  # switch_support -   don't download the switch ONL images as there are no
-  #                    switches in this configuration
-  # interface_config - don't update the network configuration of the headnode
-  #                    as it is configured by vagrant to the proper settings
-  skipTags:
-    - 'switch_support'
-    - 'interface_config'
-
-  management_ip: '10.1.0.1/24'
-  management_iface: 'eth2'
-  external_iface: 'eth0'
-  management_network: '10.1.0.0/24'
-
-  # Specifies the extra settings required for this configuration
-  #
-  # virtualbox_support - install support for managing virtual box based
-  #                      compute nodes
-  virtualbox_support: 1
-  power_helper_user: 'cord'
-
-docker:
-  imageVersion: 'candidate'
-  registry: 'docker-registry:5000'
-
-otherServers:
-  # Specifies the configuration for dynamically added compute nodes
-  location: 'http://gerrit.opencord.org/maas'
-  rolesPath: 'roles'
-  role: 'compute-node'
-  fabric:
-    network: '10.1.1.1/24'
-    range_low: '10.1.1.2'
-    range_high: '10.1.1.253'
diff --git a/config/develop.yml b/config/develop.yml
deleted file mode 100644
index 4fb7197..0000000
--- a/config/develop.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Deployment configuration for VirtualBox based head node.
-#
-# This deployment configuration can be utilized with the head node created
-# via `vargrant up headnode` from the gerrit.opencord.org/maas repository.
----
-seedServer:
-  ip: '10.100.198.202'
-
-  # User name and password used by Ansible to connect to the host for remote
-  # provisioning
-  user: 'vagrant'
-  password: 'vagrant'
-
-  # Specifies tasks within the head node provisioning not to execute, including:
-  #
-  # switch_support -   don't download the switch ONL images as there are no 
-  #                    switches in this configuration
-  # interface_config - don't update the network configuration of the headnode
-  #                    as it is configured by vagrant to the proper settings
-  skipTags:
-    - 'switch_support'
-    - 'interface_config'
-
-  # Specifies the extra settings required for this configuration
-  #
-  # virtualbox_support - install support for managing virtual box based
-  #                      compute nodes
-  extraVars:
-    - 'virtualbox_support=1'
-    - 'external_iface=eth0'
-
-otherServers:
-  # Specifies the configuration for dynamically added compute nodes
-  location: 'http://gerrit.opencord.org/maas'
-  rolesPath: 'roles'
-  role: 'compute-node'
-
-docker:
-  registry: '10.100.198.200:5000/opencord'
-  imageVersion: 'candidate'
diff --git a/config/pod5.yml b/config/pod5.yml
deleted file mode 100644
index d5e5a6d..0000000
--- a/config/pod5.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Deployment configuration for a phyical hardware POD
----
-seedServer:
-  ip: '47.135.132.21'
-  # User name and password used by Ansible to connect to the host for remote
-  # provisioning
-  user: 'ubuntu'
-  password: 'ubuntu'
-
-  # Network address information for the head node:
-  #
-  # fabric_ip     - the IP address and mask bits to be used to configure the network
-  #                 interface connected to the leaf - spine fabric
-  #
-  # management_ip - the IP address and mask bits to be used to configure the network
-  #                 interface connecting the head node to the POD internal
-  #                 management network. The head node will deliver DHCP addresses to 
-  #                 the other compute nodes over this interface
-  #
-  # external_ip   - the IP address and mask bits to be used to configure the network
-  #                 interface connecting the head node (and the POD) to the 
-  #                 Internet. All traffic in the POD to external hosts will be 
-  #                 NAT-ed through this interface
-  fabric_ip: '10.6.1.1/24'
-  management_ip: '10.6.0.1/24'
-  external_ip: '47.135.132.21/24'
-
-otherNodes:
-  # Experimental
-  #
-  # Specifies the subnet and address range that will be used to allocate IP addresses
-  # to the compute nodes as they are deployed into the POD.
-  fabric:
-    network: 10.6.1.1/24
-    range_low: 10.6.1.2
-    range_high: 10.6.1.253
diff --git a/cord-tester-deploy.yml b/cord-tester-deploy.yml
deleted file mode 100644
index 1fe6474..0000000
--- a/cord-tester-deploy.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: localhost
-  tasks:
-    - name: Archive cord tester files
-      local_action: shell tar -cvzf /tmp/cord-tester.tar.gz ../cord-tester --exclude=.git
-
-- hosts: all
-  tasks:
-    - name: Unarchive cord tester files
-      unarchive:
-        src=/tmp/cord-tester.tar.gz
-        dest=/home/{{ ansible_user }}
-        owner={{ ansible_user }}
-        group={{ ansible_user }}
-
-    - name: Cleanup remote archive files
-      file:
-        path=/home/{{ ansible_user }}/.ansible state=absent
-
-- hosts: localhost
-  tasks:
-    - name: Cleanup local archive files
-      file:
-        path=/tmp/cord-tester.tar.gz state=absent
-
-- name: Run prerequisites on head node
-  hosts: all
-  roles:
-    - prereq
diff --git a/cord-tester-verify.yml b/cord-tester-verify.yml
deleted file mode 100644
index 2259c82..0000000
--- a/cord-tester-verify.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: all
-  sudo: True
-  tasks:
-    - name: Run fabric tests using cord-tester
-      become: yes
-      shell: "{{ item }}"
-      args:
-        chdir: "/home/{{ ansible_user }}/cord-tester/src/test/setup"
-      with_items:
-        - ./cord-test.py run -e onos-fabric/onos-fabric --no-switch --prefix=docker-registry:5000 -t fabric
-        #- ./cord-test.py run --prefix=docker-registry:5000 --onos=docker-registry:5000/onosproject/onos:candidate --olt --start-switch -t cordSubscriber:subscriber_exchange.cord_test_subscriber_join_jump
-        - ./cord-test.py cleanup --olt --prefix=docker-registry:5000 --onos=docker-registry:5000/onosproject/onos:candidate
-        - docker kill cord-onos 2>/dev/null || true
-        - docker kill cord-radius 2>/dev/null || true
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
deleted file mode 100644
index 2c6137b..0000000
--- a/gradle/wrapper/gradle-wrapper.jar
+++ /dev/null
Binary files differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
deleted file mode 100644
index 4057c5d..0000000
--- a/gradle/wrapper/gradle-wrapper.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-#Wed Jun 01 15:52:16 PDT 2016
-distributionBase=GRADLE_USER_HOME
-distributionPath=wrapper/dists
-zipStoreBase=GRADLE_USER_HOME
-zipStorePath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-2.12-bin.zip
diff --git a/gradlew b/gradlew
deleted file mode 100755
index 9d82f78..0000000
--- a/gradlew
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env bash
-
-##############################################################################
-##
-##  Gradle start up script for UN*X
-##
-##############################################################################
-
-# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-DEFAULT_JVM_OPTS=""
-
-APP_NAME="Gradle"
-APP_BASE_NAME=`basename "$0"`
-
-# Use the maximum available, or set MAX_FD != -1 to use that value.
-MAX_FD="maximum"
-
-warn ( ) {
-    echo "$*"
-}
-
-die ( ) {
-    echo
-    echo "$*"
-    echo
-    exit 1
-}
-
-# OS specific support (must be 'true' or 'false').
-cygwin=false
-msys=false
-darwin=false
-case "`uname`" in
-  CYGWIN* )
-    cygwin=true
-    ;;
-  Darwin* )
-    darwin=true
-    ;;
-  MINGW* )
-    msys=true
-    ;;
-esac
-
-# Attempt to set APP_HOME
-# Resolve links: $0 may be a link
-PRG="$0"
-# Need this for relative symlinks.
-while [ -h "$PRG" ] ; do
-    ls=`ls -ld "$PRG"`
-    link=`expr "$ls" : '.*-> \(.*\)$'`
-    if expr "$link" : '/.*' > /dev/null; then
-        PRG="$link"
-    else
-        PRG=`dirname "$PRG"`"/$link"
-    fi
-done
-SAVED="`pwd`"
-cd "`dirname \"$PRG\"`/" >/dev/null
-APP_HOME="`pwd -P`"
-cd "$SAVED" >/dev/null
-
-CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
-
-# Determine the Java command to use to start the JVM.
-if [ -n "$JAVA_HOME" ] ; then
-    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
-        # IBM's JDK on AIX uses strange locations for the executables
-        JAVACMD="$JAVA_HOME/jre/sh/java"
-    else
-        JAVACMD="$JAVA_HOME/bin/java"
-    fi
-    if [ ! -x "$JAVACMD" ] ; then
-        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
-
-Please set the JAVA_HOME variable in your environment to match the
-location of your Java installation."
-    fi
-else
-    JAVACMD="java"
-    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-
-Please set the JAVA_HOME variable in your environment to match the
-location of your Java installation."
-fi
-
-# Increase the maximum file descriptors if we can.
-if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
-    MAX_FD_LIMIT=`ulimit -H -n`
-    if [ $? -eq 0 ] ; then
-        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
-            MAX_FD="$MAX_FD_LIMIT"
-        fi
-        ulimit -n $MAX_FD
-        if [ $? -ne 0 ] ; then
-            warn "Could not set maximum file descriptor limit: $MAX_FD"
-        fi
-    else
-        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
-    fi
-fi
-
-# For Darwin, add options to specify how the application appears in the dock
-if $darwin; then
-    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
-fi
-
-# For Cygwin, switch paths to Windows format before running java
-if $cygwin ; then
-    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
-    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
-    JAVACMD=`cygpath --unix "$JAVACMD"`
-
-    # We build the pattern for arguments to be converted via cygpath
-    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
-    SEP=""
-    for dir in $ROOTDIRSRAW ; do
-        ROOTDIRS="$ROOTDIRS$SEP$dir"
-        SEP="|"
-    done
-    OURCYGPATTERN="(^($ROOTDIRS))"
-    # Add a user-defined pattern to the cygpath arguments
-    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
-        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
-    fi
-    # Now convert the arguments - kludge to limit ourselves to /bin/sh
-    i=0
-    for arg in "$@" ; do
-        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
-        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
-
-        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
-            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
-        else
-            eval `echo args$i`="\"$arg\""
-        fi
-        i=$((i+1))
-    done
-    case $i in
-        (0) set -- ;;
-        (1) set -- "$args0" ;;
-        (2) set -- "$args0" "$args1" ;;
-        (3) set -- "$args0" "$args1" "$args2" ;;
-        (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
-        (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
-        (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
-        (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
-        (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
-        (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
-    esac
-fi
-
-# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
-function splitJvmOpts() {
-    JVM_OPTS=("$@")
-}
-eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
-JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
-
-exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
diff --git a/gradlew.bat b/gradlew.bat
deleted file mode 100644
index 72d362d..0000000
--- a/gradlew.bat
+++ /dev/null
@@ -1,90 +0,0 @@
-@if "%DEBUG%" == "" @echo off

-@rem ##########################################################################

-@rem

-@rem  Gradle startup script for Windows

-@rem

-@rem ##########################################################################

-

-@rem Set local scope for the variables with windows NT shell

-if "%OS%"=="Windows_NT" setlocal

-

-@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.

-set DEFAULT_JVM_OPTS=

-

-set DIRNAME=%~dp0

-if "%DIRNAME%" == "" set DIRNAME=.

-set APP_BASE_NAME=%~n0

-set APP_HOME=%DIRNAME%

-

-@rem Find java.exe

-if defined JAVA_HOME goto findJavaFromJavaHome

-

-set JAVA_EXE=java.exe

-%JAVA_EXE% -version >NUL 2>&1

-if "%ERRORLEVEL%" == "0" goto init

-

-echo.

-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.

-echo.

-echo Please set the JAVA_HOME variable in your environment to match the

-echo location of your Java installation.

-

-goto fail

-

-:findJavaFromJavaHome

-set JAVA_HOME=%JAVA_HOME:"=%

-set JAVA_EXE=%JAVA_HOME%/bin/java.exe

-

-if exist "%JAVA_EXE%" goto init

-

-echo.

-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%

-echo.

-echo Please set the JAVA_HOME variable in your environment to match the

-echo location of your Java installation.

-

-goto fail

-

-:init

-@rem Get command-line arguments, handling Windows variants

-

-if not "%OS%" == "Windows_NT" goto win9xME_args

-if "%@eval[2+2]" == "4" goto 4NT_args

-

-:win9xME_args

-@rem Slurp the command line arguments.

-set CMD_LINE_ARGS=

-set _SKIP=2

-

-:win9xME_args_slurp

-if "x%~1" == "x" goto execute

-

-set CMD_LINE_ARGS=%*

-goto execute

-

-:4NT_args

-@rem Get arguments from the 4NT Shell from JP Software

-set CMD_LINE_ARGS=%$

-

-:execute

-@rem Setup the command line

-

-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar

-

-@rem Execute Gradle

-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%

-

-:end

-@rem End local scope for the variables with windows NT shell

-if "%ERRORLEVEL%"=="0" goto mainEnd

-

-:fail

-rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of

-rem the _cmd.exe /c_ return code!

-if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1

-exit /b 1

-

-:mainEnd

-if "%OS%"=="Windows_NT" endlocal

-

-:omega

diff --git a/src/test/acl/__init__.py b/src/test/acl/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/acl/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/acl/aclTest.json b/src/test/acl/aclTest.json
deleted file mode 100644
index be46387..0000000
--- a/src/test/acl/aclTest.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-    "GATEWAY": "192.168.10.50",
-    "INGRESS_PORT" :"1",
-    "EGRESS_PORT" : "2",
-    "ingress_iface" :"1",
-    "egress_iface" : "2",
-    "MAX_PORTS":"100",
-    "CURRENT_PORT_NUM" :"egress_iface",
-    "ACL_SRC_IP":"192.168.20.3/32",
-    "ACL_DST_IP":"192.168.30.2/32",
-    "ACL_SRC_IP_RULE_2":"192.168.40.3/32",
-    "ACL_DST_IP_RULE_2":"192.168.50.2/32",
-    "ACL_SRC_IP_PREFIX_24":"192.168.20.3/24",
-    "ACL_DST_IP_PREFIX_24":"192.168.30.2/24",
-    "HOST_DST_IP":"192.168.30.0/24",
-    "HOST_DST_IP_RULE_2":"192.168.50.0/24"
-}
diff --git a/src/test/acl/aclTest.py b/src/test/acl/aclTest.py
deleted file mode 100644
index e25dfd4..0000000
--- a/src/test/acl/aclTest.py
+++ /dev/null
@@ -1,1171 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from scapy.all import *
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnosFlowCtrl import OnosFlowCtrl
-from onosclidriver import OnosCliDriver
-from CordContainer import Container, Onos
-from portmaps import g_subscriber_port_map
-from CordTestServer import cord_test_onos_restart
-from ACL import ACLTest
-from CordTestConfig import setup_module, teardown_module
-import threading
-import time
-import os
-import json
-import pexpect
-log.setLevel('INFO')
-
-class acl_exchange(unittest.TestCase):
-
-    app = ('org.onosproject.acl')
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    ingress_iface = 1
-    egress_iface = 2
-    MAX_PORTS = 100
-    CURRENT_PORT_NUM = egress_iface
-    ACL_SRC_IP = '192.168.20.3/32'
-    ACL_DST_IP = '192.168.30.2/32'
-    ACL_SRC_IP_RULE_2 = '192.168.40.3/32'
-    ACL_DST_IP_RULE_2 = '192.168.50.2/32'
-    ACL_SRC_IP_PREFIX_24 = '192.168.20.3/24'
-    ACL_DST_IP_PREFIX_24 = '192.168.30.2/24'
-    HOST_DST_IP = '192.168.30.0/24'
-    HOST_DST_IP_RULE_2 = '192.168.50.0/24'
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map,_ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        log.info('port_map = %s'%cls.port_map[1] )
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the acl app'''
-
-    def setUp(self):
-        ''' Activate the acl app'''
-        self.maxDiff = None ##for assert_equal compare outputs on failure
-        self.onos_ctrl = OnosCtrl(self.app)
-        status, _ = self.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(3)
-        status, _ = ACLTest.remove_acl_rule()
-        log.info('Start setup')
-        assert_equal(status, True)
-
-    def tearDown(self):
-        '''Deactivate the acl app'''
-        log.info('Tear down setup')
-        self.CURRENT_PORT_NUM = 4
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def acl_hosts_add(cls, dstHostIpMac, egress_iface_count = 1,  egress_iface_num = None):
-        index = 0
-        if egress_iface_num is None:
-            egress_iface_num = cls.egress_iface
-        for ip,_ in dstHostIpMac:
-            egress = cls.port_map[egress_iface_num]
-            log.info('Assigning ip %s to interface %s' %(ip, egress))
-            config_cmds_egress = ( 'ifconfig {} 0'.format(egress),
-                                   'ifconfig {0} up'.format(egress),
-                                   'ifconfig {0} {1}'.format(egress, ip),
-                                   'arping -I {0} {1} -c 2'.format(egress, ip.split('/')[0]),
-                                   'ifconfig {0}'.format(egress),
-                                 )
-            for cmd in config_cmds_egress:
-                os.system(cmd)
-            index += 1
-            if index == egress_iface_count:
-               break
-            egress_iface_count += 1
-            egress_iface_num += 1
-
-
-    @classmethod
-    def acl_hosts_remove(cls, egress_iface_count = 1,  egress_iface_num = None):
-        if egress_iface_num is None:
-           egress_iface_num = cls.egress_iface
-        n = 0
-        for n in range(egress_iface_count):
-           egress = cls.port_map[egress_iface_num]
-           config_cmds_egress = ('ifconfig {} 0'.format(egress))
-           os.system(config_cmds_egress)
-           egress_iface_num += 1
-
-#    @classmethod
-    def acl_rule_traffic_send_recv(self, srcMac, dstMac, srcIp, dstIp, ingress =None, egress=None, ip_proto=None, dstPortNum = None, positive_test = True):
-        if ingress is None:
-           ingress = self.ingress_iface
-        if egress is None:
-           egress = self.egress_iface
-        ingress = self.port_map[ingress]
-        egress = self.port_map[egress]
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dstIp.split('/')[0] and p[IP].src == srcIp.split('/')[0],
-                  prn = recv_cb, iface = egress)
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = srcMac, dst = dstMac)
-        L3 = IP(src = srcIp.split('/')[0], dst = dstIp.split('/')[0])
-        pkt = L2/L3
-        log.info('Sending a packet with dst ip %s, src ip %s , dst mac %s src mac %s on port %s to verify if flows are correct' %
-                 (dstIp.split('/')[0], srcIp.split('/')[0], dstMac, srcMac, ingress))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = ingress)
-        t.join()
-        assert_equal(self.success, True)
-
-    @classmethod
-    def onos_load_config(cls, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_allow_rule(self):
-        acl_rule = ACLTest()
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-	aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_allow_rule_with_24_bit_mask(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_PREFIX_24, dstIp =self.ACL_DST_IP_PREFIX_24, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_deny_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-	aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_deny_rule_with_24_bit_mask(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_PREFIX_24, dstIp =self.ACL_DST_IP_PREFIX_24, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_add_remove_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        status, code = acl_rule.remove_acl_rule(acl_Id[0])
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_add_remove_all_rules(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        status,code = acl_rule.adding_acl_rule('v4', srcIp='10.10.10.10/24', dstIp ='20.20.20.20/24', action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        status, _ = ACLTest.remove_acl_rule()
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_remove_all_rules_without_add(self):
-        status, _ = ACLTest.remove_acl_rule()
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_allow_and_deny_rule_for_same_src_and_dst_ip(self):
-        acl_rule = ACLTest()
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, False)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        status, _ = ACLTest.remove_acl_rule()
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_allow_rules_for_matched_dst_ips(self):
-        acl_rule = ACLTest()
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp ='192.168.30.2/24', action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, False)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        status, _ = ACLTest.remove_acl_rule()
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_with_matching_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-	acl_rule = ACLTest()
-        status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-	aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP')
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_with_matching_24bit_mask_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_PREFIX_24, dstIp =self.ACL_DST_IP_PREFIX_24, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP, ingress =ingress, egress = egress, ip_proto = 'UDP')
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_with_non_matching_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-	acl_rule = ACLTest()
-        status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-	aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac, srcIp ='192.168.40.1/24', dstIp = self.ACL_DST_IP, ingress=ingress, egress = egress, ip_proto = 'UDP', positive_test = False )
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_deny_rule_with_matching_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_deny_rule_with_src_and_dst_ip_applying_24_bit_mask_for_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_PREFIX_24, dstIp =self.ACL_DST_IP_PREFIX_24, action = 'deny')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_PREFIX_24, dstIp = self.ACL_DST_IP_PREFIX_24,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_deny_rule_with_non_matching_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp ='192.168.40.1/24', dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_allow_and_deny_rules_with_matching_src_and_dst_ip_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        egress = self.CURRENT_PORT_NUM
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress ,ip_proto = 'UDP')
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP_RULE_2)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_RULE_2, dstIp =self.ACL_DST_IP_RULE_2, action = 'deny')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_RULE_2, dstIp = self.ACL_DST_IP_RULE_2,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        ### crossing checking that we should not receive allow acl rule traffic on onther host non matched traffic
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_for_l4_acl_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-	aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_for_remove_l4_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='245', action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        status, code = acl_rule.remove_acl_rule(acl_Id[0])
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_for_remove_l4_rules(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='567', action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='245', action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='ICMP', dstTpPort ='1',action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 3)
-        status, _ = ACLTest.remove_acl_rule()
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    def test_acl_adding_specific_l4_and_all_l4_allow_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-
-    def test_acl_adding_all_l4_and_specific_l4_allow_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='345', action = 'allow')
-        if status is True:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_with_specific_l4_and_all_l4_deny_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-
-    def test_acl_with_all_l4_and_specific_l4_deny_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='345', action = 'deny')
-        if status is True:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_with_specific_l4_deny_and_all_l4_allow_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-
-    def test_acl_deny_all_l4_and_allow_specific_l4_rule(self):
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='345', action = 'allow')
-        if status is True:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-	log.info('Added ACL rules  = %s' %result.json()['aclRules'])
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-
-    def test_acl_tcp_port_allow_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'allow')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 222)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 444, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_udp_port_allow_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='456', action = 'allow')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 456)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 654, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_icmp_port_allow_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='ICMP', dstTpPort ='1', action = 'allow')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'ICMP', dstPortNum = 1)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'ICMP', dstPortNum = 2, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_tcp_port_deny_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'deny')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 222, positive_test = False)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 444, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_udp_port_deny_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='654', action = 'deny')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 654, positive_test = False)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 444, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_icmp_port_deny_rule_for_matching_and_non_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='ICMP', dstTpPort ='1', action = 'deny')
-        time.sleep(20)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL Rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'ICMP', dstPortNum = 1, positive_test = False)
-        ## Non-matching traffic for TCP portocol testing
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'ICMP', dstPortNum = 2, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    def test_acl_two_allow_rules_for_tcp_port_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='TCP', dstTpPort ='222', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        egress = self.CURRENT_PORT_NUM
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 222)
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP_RULE_2)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_RULE_2, dstIp =self.ACL_DST_IP_RULE_2, ipProto ='TCP', dstTpPort ='345', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_RULE_2, dstIp = self.ACL_DST_IP_RULE_2,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 345)
-        ### crossing checking that we should not receive allow acl rule traffic on onther host non matched traffic
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 222, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 2,  egress_iface_num = egress-1)
-
-    def test_acl_two_allow_rules_for_udp_ports_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, ipProto ='UDP', dstTpPort ='987', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        egress = self.CURRENT_PORT_NUM
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 987)
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP_RULE_2)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_RULE_2, dstIp =self.ACL_DST_IP_RULE_2, ipProto ='TCP', dstTpPort ='345', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_RULE_2, dstIp = self.ACL_DST_IP_RULE_2,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 345)
-        ### crossing checking that we should not receive allow acl rule traffic on onther host non matched traffic
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 987, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 2,  egress_iface_num = egress-1)
-
-    def test_acl_two_allow_rules_for_src_ips_dst_ips_and_l4_ports_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        egress = self.CURRENT_PORT_NUM
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP')
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP_RULE_2)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_RULE_2, dstIp =self.ACL_DST_IP_RULE_2, ipProto ='TCP', dstTpPort ='345', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_RULE_2, dstIp = self.ACL_DST_IP_RULE_2,ingress =ingress, egress = egress, ip_proto = 'TCP', dstPortNum = 345)
-        ### crossing checking that we should not receive allow acl rule traffic on onther host non matched traffic
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 987, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 2,  egress_iface_num = egress-1)
-
-    def test_acl_allow_and_deny_rules_for_src_ips_dst_ips_and_l4_ports_matching_traffic(self):
-        ingress = self.ingress_iface
-        egress = self.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP, dstIp =self.ACL_DST_IP, action = 'deny')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 1)
-        log.info('Added ACL rules = %s' %result.json()['aclRules'])
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log.info('Discovered hosts: %s' %hosts)
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        #log.info('Flows: %s' %flows)
-        assert_not_equal(len(flows), 0)
-        egress = self.CURRENT_PORT_NUM
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP_RULE_2)
-        self.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        dstMac = host_ip_mac[0][1]
-        self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.ACL_SRC_IP_RULE_2, dstIp =self.ACL_DST_IP_RULE_2, ipProto ='UDP', dstTpPort ='345', action = 'allow')
-        time.sleep(10)
-        if status is False:
-            log.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules()
-        aclRules1 = result.json()['aclRules']
-        acl_Id = map(lambda d: d['id'], aclRules1)
-        assert_equal(len(acl_Id), 2)
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP_RULE_2, dstIp = self.ACL_DST_IP_RULE_2,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 345)
-        ### crossing checking that we should not receive allow acl rule traffic on onther host non matched traffic
-        self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 987, positive_test = False)
-        self.cliExit()
-        self.acl_hosts_remove(egress_iface_count = 2,  egress_iface_num = egress-1)
diff --git a/src/test/apps/aaa-1.0-SNAPSHOT.oar b/src/test/apps/aaa-1.0-SNAPSHOT.oar
deleted file mode 100644
index f5a462c..0000000
--- a/src/test/apps/aaa-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/aaa-1.1-SNAPSHOT.oar b/src/test/apps/aaa-1.1-SNAPSHOT.oar
deleted file mode 100644
index 46add8a..0000000
--- a/src/test/apps/aaa-1.1-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/aaa-1.2-SNAPSHOT.oar b/src/test/apps/aaa-1.2-SNAPSHOT.oar
deleted file mode 100644
index 2d0c9dc..0000000
--- a/src/test/apps/aaa-1.2-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/aaa-2.0-SNAPSHOT.oar b/src/test/apps/aaa-2.0-SNAPSHOT.oar
deleted file mode 100644
index 20e88ca..0000000
--- a/src/test/apps/aaa-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/aaa-3.0-SNAPSHOT.oar b/src/test/apps/aaa-3.0-SNAPSHOT.oar
deleted file mode 100644
index 631aff9..0000000
--- a/src/test/apps/aaa-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar
deleted file mode 100644
index 31b83b4..0000000
--- a/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-2.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-2.0-SNAPSHOT.oar
deleted file mode 100644
index b75faab..0000000
--- a/src/test/apps/ciena-cordigmp-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-3.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-3.0-SNAPSHOT.oar
deleted file mode 100644
index 462df58..0000000
--- a/src/test/apps/ciena-cordigmp-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-cbench-1.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-cbench-1.0-SNAPSHOT.oar
deleted file mode 100644
index b2b69e4..0000000
--- a/src/test/apps/ciena-cordigmp-cbench-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-multitable-1.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-multitable-1.0-SNAPSHOT.oar
deleted file mode 100644
index 5d5c39b..0000000
--- a/src/test/apps/ciena-cordigmp-multitable-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar
deleted file mode 100644
index d22de1b..0000000
--- a/src/test/apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
deleted file mode 100644
index e344d26..0000000
--- a/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp-onos-1.5.oar b/src/test/apps/ciena-cordigmp-onos-1.5.oar
deleted file mode 100644
index 7f1a19d..0000000
--- a/src/test/apps/ciena-cordigmp-onos-1.5.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp.multi-table/pom.xml b/src/test/apps/ciena-cordigmp.multi-table/pom.xml
deleted file mode 100644
index 192becb..0000000
--- a/src/test/apps/ciena-cordigmp.multi-table/pom.xml
+++ /dev/null
@@ -1,186 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>

-<!--

-  ~ Copyright 2016 Open Networking Foundation
-  ~

-  ~ Licensed under the Apache License, Version 2.0 (the "License");

-  ~ you may not use this file except in compliance with the License.

-  ~ You may obtain a copy of the License at

-  ~

-  ~     http://www.apache.org/licenses/LICENSE-2.0

-  ~

-  ~ Unless required by applicable law or agreed to in writing, software

-  ~ distributed under the License is distributed on an "AS IS" BASIS,

-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

-  ~ See the License for the specific language governing permissions and

-  ~ limitations under the License.

-  -->

-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

-    <modelVersion>4.0.0</modelVersion>

-

-        <parent>

-        <groupId>org.onosproject</groupId>

-        <artifactId>onos-dependencies</artifactId>

-        <version>1.10.0-rc4</version>

-        <relativePath></relativePath>

-    </parent>

-

-    <groupId>org.ciena.cordigmp</groupId>

-    <artifactId>ciena-cordigmp</artifactId>

-    <version>3.0-SNAPSHOT</version>

-    <packaging>bundle</packaging>

-

-          <repositories>

-        <repository>

-          <id>oss-staging</id>

-          <name>OSS Staging</name>

-          <url>https://oss.sonatype.org/content/groups/staging</url>

-        </repository>

-      </repositories>

-

-    <description>Ciena CORD IGMP for OVS</description>

-    <url>http://onosproject.org</url>

-

-    <properties>

-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

-        <onos.version>1.10.0-rc4</onos.version>

-        <onos.app.name>org.ciena.cordigmp</onos.app.name>

-        <onos.app.title>Ciena IGMP for OVS</onos.app.title>

-        <onos.app.origin>Ciena Inc.</onos.app.origin>

-        <onos.app.category>default</onos.app.category>

-        <onos.app.url>http://onosproject.org</onos.app.url>

-        <onos.app.readme>ONOS OSGi bundle archetype.</onos.app.readme>

-        <onos.app.requires>org.opencord.config</onos.app.requires>

-    </properties>

-

-    <dependencies>

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-api</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.opencord</groupId>

-            <artifactId>cord-config</artifactId>

-            <version>1.0-SNAPSHOT</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onlab-osgi</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>junit</groupId>

-            <artifactId>junit</artifactId>

-            <version>4.12</version>

-            <scope>test</scope>

-        </dependency>

-

-        <dependency>

-            <groupId>org.apache.felix</groupId>

-            <artifactId>org.apache.felix.scr.annotations</artifactId>

-            <version>1.9.12</version>

-            <scope>provided</scope>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-cli</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-      <dependency>

-            <groupId>org.osgi</groupId>

-            <artifactId>org.osgi.compendium</artifactId>

-            <version>5.0.0</version>

-        </dependency>

-

-              <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onlab-misc</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-incubator-api</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-core-common</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-    </dependencies>

-

-    <build>

-        <plugins>

-            <plugin>

-                <groupId>org.apache.felix</groupId>

-                <artifactId>maven-bundle-plugin</artifactId>

-                <version>3.0.1</version>

-                <extensions>true</extensions>

-            </plugin>

-            <plugin>

-                <groupId>org.apache.maven.plugins</groupId>

-                <artifactId>maven-compiler-plugin</artifactId>

-                <version>2.5.1</version>

-                <configuration>

-                    <source>1.8</source>

-                    <target>1.8</target>

-                </configuration>

-            </plugin>

-            <plugin>

-                <groupId>org.apache.felix</groupId>

-                <artifactId>maven-scr-plugin</artifactId>

-                <version>1.21.0</version>

-                <executions>

-                    <execution>

-                        <id>generate-scr-srcdescriptor</id>

-                        <goals>

-                            <goal>scr</goal>

-                        </goals>

-                    </execution>

-                </executions>

-                <configuration>

-                    <supportedProjectTypes>

-                        <supportedProjectType>bundle</supportedProjectType>

-                        <supportedProjectType>war</supportedProjectType>

-                    </supportedProjectTypes>

-                </configuration>

-            </plugin>

-            <plugin>

-                <groupId>org.onosproject</groupId>

-                <artifactId>onos-maven-plugin</artifactId>

-                <version>1.9</version>

-                <executions>

-                    <execution>

-                        <id>cfg</id>

-                        <phase>generate-resources</phase>

-                        <goals>

-                            <goal>cfg</goal>

-                        </goals>

-                    </execution>

-                    <execution>

-                        <id>swagger</id>

-                        <phase>generate-sources</phase>

-                        <goals>

-                            <goal>swagger</goal>

-                        </goals>

-                    </execution>

-                    <execution>

-                        <id>app</id>

-                        <phase>package</phase>

-                        <goals>

-                            <goal>app</goal>

-                        </goals>

-                    </execution>

-                </executions>

-            </plugin>

-        </plugins>

-    </build>

-

-</project>

diff --git a/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/CordIgmp.java b/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/CordIgmp.java
deleted file mode 100644
index d6e97ea..0000000
--- a/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/CordIgmp.java
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright 2016-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.cordigmp;
-
-import com.google.common.collect.Maps;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Modified;
-import org.apache.felix.scr.annotations.Property;
-import org.apache.felix.scr.annotations.Reference;
-import org.apache.felix.scr.annotations.ReferenceCardinality;
-import org.onlab.packet.Ethernet;
-import org.onlab.packet.IpAddress;
-import org.onlab.packet.VlanId;
-import org.onosproject.cfg.ComponentConfigService;
-import org.opencord.cordconfig.access.AccessDeviceConfig;
-import org.opencord.cordconfig.access.AccessDeviceData;
-import org.onosproject.core.ApplicationId;
-import org.onosproject.core.CoreService;
-import org.onosproject.net.ConnectPoint;
-import org.onosproject.net.DeviceId;
-import org.onosproject.net.config.ConfigFactory;
-import org.onosproject.net.config.NetworkConfigEvent;
-import org.onosproject.net.config.NetworkConfigListener;
-import org.onosproject.net.config.NetworkConfigRegistry;
-import org.onosproject.net.config.basics.SubjectFactories;
-import org.onosproject.net.flow.DefaultTrafficSelector;
-import org.onosproject.net.flow.DefaultTrafficTreatment;
-import org.onosproject.net.flow.TrafficSelector;
-import org.onosproject.net.flowobjective.DefaultForwardingObjective;
-import org.onosproject.net.flowobjective.DefaultNextObjective;
-import org.onosproject.net.flowobjective.FlowObjectiveService;
-import org.onosproject.net.flowobjective.ForwardingObjective;
-import org.onosproject.net.flowobjective.NextObjective;
-import org.onosproject.net.flowobjective.Objective;
-import org.onosproject.net.flowobjective.ObjectiveContext;
-import org.onosproject.net.flowobjective.ObjectiveError;
-import org.onosproject.net.mcast.McastEvent;
-import org.onosproject.net.mcast.McastListener;
-import org.onosproject.net.mcast.McastRoute;
-import org.onosproject.net.mcast.McastRouteInfo;
-import org.onosproject.net.mcast.MulticastRouteService;
-
-import org.osgi.service.component.ComponentContext;
-import org.slf4j.Logger;
-
-import java.util.Dictionary;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.base.Strings.isNullOrEmpty;
-import static org.onlab.util.Tools.get;
-import static org.slf4j.LoggerFactory.getLogger;
-
-/**
- * CORD multicast provisioning application. Operates by listening to
- * events on the multicast rib and provisioning groups to program multicast
- * flows on the dataplane.
- */
-@Component(immediate = true)
-public class CordIgmp {
-
-
-    private static final int DEFAULT_PRIORITY = 500;
-    private static final short DEFAULT_MCAST_VLAN = 4000;
-    private static final boolean DEFAULT_VLAN_ENABLED = false;
-
-    private final Logger log = getLogger(getClass());
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected MulticastRouteService mcastService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected FlowObjectiveService flowObjectiveService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected CoreService coreService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected ComponentConfigService componentConfigService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected NetworkConfigRegistry networkConfig;
-
-    protected McastListener listener = new InternalMulticastListener();
-    private InternalNetworkConfigListener configListener =
-            new InternalNetworkConfigListener();
-
-    //TODO: move this to a ec map
-    private Map<IpAddress, Integer> groups = Maps.newConcurrentMap();
-
-    private ApplicationId appId;
-
-    @Property(name = "mcastVlan", intValue = DEFAULT_MCAST_VLAN,
-            label = "VLAN for multicast traffic")
-    private int mcastVlan = DEFAULT_MCAST_VLAN;
-
-    @Property(name = "vlanEnabled", boolValue = DEFAULT_VLAN_ENABLED,
-            label = "Use vlan for multicast traffic?")
-    private boolean vlanEnabled = DEFAULT_VLAN_ENABLED;
-
-    @Property(name = "priority", intValue = DEFAULT_PRIORITY,
-            label = "Priority for multicast rules")
-    private int priority = DEFAULT_PRIORITY;
-
-    private Map<DeviceId, AccessDeviceData> oltData = new ConcurrentHashMap<>();
-
-    private static final Class<AccessDeviceConfig> CONFIG_CLASS =
-            AccessDeviceConfig.class;
-
-    private ConfigFactory<DeviceId, AccessDeviceConfig> configFactory =
-            new ConfigFactory<DeviceId, AccessDeviceConfig>(
-                    SubjectFactories.DEVICE_SUBJECT_FACTORY, CONFIG_CLASS, "accessDevice") {
-                @Override
-                public AccessDeviceConfig createConfig() {
-                    return new AccessDeviceConfig();
-                }
-            };
-
-    @Activate
-    public void activate(ComponentContext context) {
-        componentConfigService.registerProperties(getClass());
-        modified(context);
-
-        appId = coreService.registerApplication("org.ciena.cordigmp");
-
-
-        networkConfig.registerConfigFactory(configFactory);
-        networkConfig.addListener(configListener);
-
-        networkConfig.getSubjects(DeviceId.class, AccessDeviceConfig.class).forEach(
-                subject -> {
-                    AccessDeviceConfig config = networkConfig.getConfig(subject, AccessDeviceConfig.class);
-                    if (config != null) {
-                        AccessDeviceData data = config.getOlt();
-                        oltData.put(data.deviceId(), data);
-                    }
-                }
-        );
-
-
-        mcastService.addListener(listener);
-
-        mcastService.getRoutes().stream()
-                .map(r -> new ImmutablePair<>(r, mcastService.fetchSinks(r)))
-                .filter(pair -> pair.getRight() != null && !pair.getRight().isEmpty())
-                .forEach(pair -> pair.getRight().forEach(sink -> provisionGroup(pair.getLeft(),
-                        sink)));
-
-        log.info("Started");
-    }
-
-    @Deactivate
-    public void deactivate() {
-        componentConfigService.unregisterProperties(getClass(), false);
-        mcastService.removeListener(listener);
-        networkConfig.unregisterConfigFactory(configFactory);
-        networkConfig.removeListener(configListener);
-        log.info("Stopped");
-    }
-
-    @Modified
-    public void modified(ComponentContext context) {
-        Dictionary<?, ?> properties = context != null ? context.getProperties() : new Properties();
-
-        try {
-
-            String s = get(properties, "mcastVlan");
-            mcastVlan = isNullOrEmpty(s) ? DEFAULT_MCAST_VLAN : Short.parseShort(s.trim());
-
-            s = get(properties, "vlanEnabled");
-            vlanEnabled = isNullOrEmpty(s) ? DEFAULT_VLAN_ENABLED : Boolean.parseBoolean(s.trim());
-
-            s = get(properties, "priority");
-            priority = isNullOrEmpty(s) ? DEFAULT_PRIORITY : Integer.parseInt(s.trim());
-
-        } catch (Exception e) {
-            mcastVlan = DEFAULT_MCAST_VLAN;
-            vlanEnabled = false;
-            priority = DEFAULT_PRIORITY;
-        }
-    }
-
-    private class InternalMulticastListener implements McastListener {
-        @Override
-        public void event(McastEvent event) {
-            McastRouteInfo info = event.subject();
-            switch (event.type()) {
-                case ROUTE_ADDED:
-                    break;
-                case ROUTE_REMOVED:
-                    break;
-                case SOURCE_ADDED:
-                    break;
-                case SINK_ADDED:
-                    if (!info.sink().isPresent()) {
-                        log.warn("No sink given after sink added event: {}", info);
-                        return;
-                    }
-                    provisionGroup(info.route(), info.sink().get());
-                    break;
-                case SINK_REMOVED:
-                    unprovisionGroup(event.subject());
-                    break;
-                default:
-                    log.warn("Unknown mcast event {}", event.type());
-            }
-        }
-    }
-
-    private void unprovisionGroup(McastRouteInfo info) {
-
-        if (!info.sink().isPresent()) {
-            log.warn("No sink given after sink removed event: {}", info);
-            return;
-        }
-        ConnectPoint loc = info.sink().get();
-        log.info("Removing flow for subscriber port: {}, group {}",
-                loc.port(), info.route().group());
-        NextObjective next = DefaultNextObjective.builder()
-                .fromApp(appId)
-                .addTreatment(DefaultTrafficTreatment.builder().setOutput(loc.port()).build())
-                .withType(NextObjective.Type.BROADCAST)
-                .withId(groups.get(info.route().group()))
-                .removeFromExisting(new ObjectiveContext() {
-                    @Override
-                    public void onSuccess(Objective objective) {
-                        //TODO: change to debug
-                        log.info("Next Objective {} removed", objective.id());
-                    }
-
-                    @Override
-                    public void onError(Objective objective, ObjectiveError error) {
-                        //TODO: change to debug
-                        log.info("Next Objective {} failed, because {}",
-                                objective.id(),
-                                error);
-                    }
-                });
-
-        flowObjectiveService.next(loc.deviceId(), next);
-    }
-
-    private void provisionGroup(McastRoute route, ConnectPoint sink) {
-        checkNotNull(route, "Route cannot be null");
-        checkNotNull(sink, "Sink cannot be null");
-
-        AccessDeviceData oltInfo = oltData.get(sink.deviceId());
-
-        if (oltInfo == null) {
-            log.warn("Unknown OLT device : {}", sink.deviceId());
-            return;
-        }
-
-        final AtomicBoolean sync = new AtomicBoolean(false);
-
-        log.info("Provisioning sink for device {}", sink.deviceId());
-
-        Integer nextId = groups.computeIfAbsent(route.group(), (g) -> {
-            Integer id = flowObjectiveService.allocateNextId();
-            NextObjective next = DefaultNextObjective.builder()
-                    .fromApp(appId)
-                    .addTreatment(DefaultTrafficTreatment.builder().setOutput(sink.port()).build())
-                    .withType(NextObjective.Type.BROADCAST)
-                    .withId(id)
-                    .add(new ObjectiveContext() {
-                        @Override
-                        public void onSuccess(Objective objective) {
-                            //TODO: change to debug
-                            log.info("Next Objective {} installed", objective.id());
-                        }
-
-                        @Override
-                        public void onError(Objective objective, ObjectiveError error) {
-                            //TODO: change to debug
-                            log.info("Next Objective {} failed to add, because {}",
-                                    objective.id(),
-                                    error);
-                        }
-                    });
-
-            flowObjectiveService.next(sink.deviceId(), next);
-
-            TrafficSelector.Builder mcast = DefaultTrafficSelector.builder()
-                .matchInPort(oltInfo.uplink())
-                .matchEthType(Ethernet.TYPE_IPV4)
-                .matchIPDst(g.toIpPrefix());
-
-            if (vlanEnabled) {
-                mcast.matchVlanId(VlanId.vlanId((short) mcastVlan));
-            }
-
-            ForwardingObjective fwd = DefaultForwardingObjective.builder()
-                    .fromApp(appId)
-                    .nextStep(id)
-                    .makePermanent()
-                    .withFlag(ForwardingObjective.Flag.VERSATILE)
-                    .withPriority(priority)
-                    .withSelector(mcast.build())
-                    .add(new ObjectiveContext() {
-                        @Override
-                        public void onSuccess(Objective objective) {
-                            //TODO: change to debug
-                            log.info("Forwarding objective installed {}", objective);
-                        }
-
-                        @Override
-                        public void onError(Objective objective, ObjectiveError error) {
-                            //TODO: change to debug
-                            log.info("Forwarding objective failed {}", objective);
-                        }
-                    });
-
-            flowObjectiveService.forward(sink.deviceId(), fwd);
-
-            sync.set(true);
-            log.info("Installed flows for device: {}, id {}, ip {}, port {}",
-                    sink.deviceId(), id, g.toIpPrefix(), sink.port());
-            return id;
-        });
-
-        if (!sync.get()) {
-            NextObjective next = DefaultNextObjective.builder()
-                    .fromApp(appId)
-                    .addTreatment(DefaultTrafficTreatment.builder().setOutput(sink.port()).build())
-                    .withType(NextObjective.Type.BROADCAST)
-                    .withId(nextId)
-                    .addToExisting(new ObjectiveContext() {
-                        @Override
-                        public void onSuccess(Objective objective) {
-                            //TODO: change to debug
-                            log.info("Next Objective {} installed to existing", objective.id());
-                        }
-
-                        @Override
-                        public void onError(Objective objective, ObjectiveError error) {
-                            //TODO: change to debug
-                            log.info("Next Objective {} failed to install to existing, because {}",
-                                    objective.id(),
-                                    error);
-                        }
-                    });
-
-            flowObjectiveService.next(sink.deviceId(), next);
-
-            log.info("Append flows for device {}, id {}, ip {}, port {}", sink.deviceId(), nextId,
-                    route.group().toIpPrefix(), sink.port());
-        }
-
-    }
-
-    private class InternalNetworkConfigListener implements NetworkConfigListener {
-        @Override
-        public void event(NetworkConfigEvent event) {
-            switch (event.type()) {
-
-                case CONFIG_ADDED:
-                case CONFIG_UPDATED:
-                    AccessDeviceConfig config =
-                            networkConfig.getConfig((DeviceId) event.subject(), CONFIG_CLASS);
-                    if (config != null) {
-                        oltData.put(config.getOlt().deviceId(), config.getOlt());
-                    }
-
-                    break;
-                case CONFIG_REGISTERED:
-                case CONFIG_UNREGISTERED:
-                    break;
-                case CONFIG_REMOVED:
-                    oltData.remove(event.subject());
-                    break;
-                default:
-                    break;
-            }
-        }
-
-        @Override
-        public boolean isRelevant(NetworkConfigEvent event) {
-            return event.configClass().equals(CONFIG_CLASS);
-        }
-    }
-}
diff --git a/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/package-info.java b/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/package-info.java
deleted file mode 100644
index 7214bf7..0000000
--- a/src/test/apps/ciena-cordigmp.multi-table/src/main/java/org/ciena/cordigmp/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2015-2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Ciena application for cord tester to provision flows with ovs.
- * This is required as OVS onos driver does not support multi-table inserts.
- * This application takes a port pair configuration per group to provision flows.
- * To be used in simulation environments with subscriber tests.
- * On the target, cordmcast app should be used.
- */
-package org.ciena.cordigmp;
diff --git a/src/test/apps/ciena-cordigmp/pom.xml b/src/test/apps/ciena-cordigmp/pom.xml
deleted file mode 100644
index 6365894..0000000
--- a/src/test/apps/ciena-cordigmp/pom.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>

-<!--

-  ~ Copyright 2016 Open Networking Foundation
-  ~

-  ~ Licensed under the Apache License, Version 2.0 (the "License");

-  ~ you may not use this file except in compliance with the License.

-  ~ You may obtain a copy of the License at

-  ~

-  ~     http://www.apache.org/licenses/LICENSE-2.0

-  ~

-  ~ Unless required by applicable law or agreed to in writing, software

-  ~ distributed under the License is distributed on an "AS IS" BASIS,

-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

-  ~ See the License for the specific language governing permissions and

-  ~ limitations under the License.

-  -->

-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

-    <modelVersion>4.0.0</modelVersion>

-

-   <parent>

-        <groupId>org.onosproject</groupId>

-        <artifactId>onos-dependencies</artifactId>

-        <version>1.10.0-rc4</version>

-        <relativePath></relativePath>

-    </parent>

-

-    <groupId>org.ciena.cordigmp</groupId>

-    <artifactId>ciena-cordigmp</artifactId>

-    <version>3.0-SNAPSHOT</version>

-    <packaging>bundle</packaging>

-

-          <repositories>

-        <repository>

-          <id>oss-staging</id>

-          <name>OSS Staging</name>

-          <url>https://oss.sonatype.org/content/groups/staging</url>

-        </repository>

-      </repositories>

-

-    <description>Ciena CORD IGMP for OVS</description>

-    <url>http://onosproject.org</url>

-

-    <properties>

-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

-        <onos.version>1.10.0-rc4</onos.version>

-        <onos.app.name>org.ciena.cordigmp</onos.app.name>

-        <onos.app.title>Ciena IGMP for OVS</onos.app.title>

-        <onos.app.origin>Ciena Inc.</onos.app.origin>

-        <onos.app.category>default</onos.app.category>

-        <onos.app.url>http://onosproject.org</onos.app.url>

-        <onos.app.readme>ONOS OSGi bundle archetype.</onos.app.readme>

-        <onos.app.requires>org.opencord.config</onos.app.requires>

-    </properties>

-

-

-    <dependencies>

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-api</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onlab-osgi</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.opencord</groupId>

-            <artifactId>cord-config</artifactId>

-            <version>1.0-SNAPSHOT</version>

-        </dependency>

-

-        <dependency>

-            <groupId>junit</groupId>

-            <artifactId>junit</artifactId>

-            <version>4.12</version>

-            <scope>test</scope>

-        </dependency>

-

-        <dependency>

-            <groupId>org.apache.felix</groupId>

-            <artifactId>org.apache.felix.scr.annotations</artifactId>

-            <version>1.9.12</version>

-            <scope>provided</scope>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-cli</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-      <dependency>

-            <groupId>org.osgi</groupId>

-            <artifactId>org.osgi.compendium</artifactId>

-            <version>5.0.0</version>

-      </dependency>

-

-              <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onlab-misc</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-incubator-api</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-        <dependency>

-            <groupId>org.onosproject</groupId>

-            <artifactId>onos-core-common</artifactId>

-            <version>${onos.version}</version>

-        </dependency>

-

-    </dependencies>

-

-    <build>

-        <plugins>

-            <plugin>

-                <groupId>org.apache.felix</groupId>

-                <artifactId>maven-bundle-plugin</artifactId>

-                <version>3.0.1</version>

-                <extensions>true</extensions>

-            </plugin>

-            <plugin>

-                <groupId>org.apache.maven.plugins</groupId>

-                <artifactId>maven-compiler-plugin</artifactId>

-                <version>2.5.1</version>

-                <configuration>

-                    <source>1.8</source>

-                    <target>1.8</target>

-                </configuration>

-            </plugin>

-            <plugin>

-                <groupId>org.apache.felix</groupId>

-                <artifactId>maven-scr-plugin</artifactId>

-                <version>1.21.0</version>

-                <executions>

-                    <execution>

-                        <id>generate-scr-srcdescriptor</id>

-                        <goals>

-                            <goal>scr</goal>

-                        </goals>

-                    </execution>

-                </executions>

-                <configuration>

-                    <supportedProjectTypes>

-                        <supportedProjectType>bundle</supportedProjectType>

-                        <supportedProjectType>war</supportedProjectType>

-                    </supportedProjectTypes>

-                </configuration>

-            </plugin>

-            <plugin>

-                <groupId>org.onosproject</groupId>

-                <artifactId>onos-maven-plugin</artifactId>

-                <version>1.9</version>

-                <executions>

-                    <execution>

-                        <id>cfg</id>

-                        <phase>generate-resources</phase>

-                        <goals>

-                            <goal>cfg</goal>

-                        </goals>

-                    </execution>

-                    <execution>

-                        <id>swagger</id>

-                        <phase>generate-sources</phase>

-                        <goals>

-                            <goal>swagger</goal>

-                        </goals>

-                    </execution>

-                    <execution>

-                        <id>app</id>

-                        <phase>package</phase>

-                        <goals>

-                            <goal>app</goal>

-                        </goals>

-                    </execution>

-                </executions>

-            </plugin>

-        </plugins>

-    </build>

-

-</project>

diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java
deleted file mode 100644
index 75bb2f6..0000000
--- a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright 2015-2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.cordigmp;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multiset;
-import com.google.common.collect.ConcurrentHashMultiset;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Modified;
-import org.apache.felix.scr.annotations.Property;
-import org.apache.felix.scr.annotations.Reference;
-import org.apache.felix.scr.annotations.ReferenceCardinality;
-import org.onlab.packet.Ethernet;
-import org.onlab.packet.IpAddress;
-import org.onosproject.cfg.ComponentConfigService;
-import org.onosproject.core.ApplicationId;
-import org.onosproject.core.CoreService;
-import org.onosproject.net.ConnectPoint;
-import org.onosproject.net.DeviceId;
-import org.onosproject.net.config.ConfigFactory;
-import org.onosproject.net.config.NetworkConfigEvent;
-import org.onosproject.net.config.NetworkConfigListener;
-import org.onosproject.net.config.NetworkConfigRegistry;
-import org.onosproject.net.config.basics.SubjectFactories;
-import org.onosproject.net.flow.DefaultTrafficSelector;
-import org.onosproject.net.flow.DefaultTrafficTreatment;
-import org.onosproject.net.flow.TrafficTreatment;
-import org.onosproject.net.flow.TrafficSelector;
-import org.onosproject.net.device.DeviceEvent;
-import org.onosproject.net.device.DeviceListener;
-import org.onosproject.net.device.DeviceService;
-import org.onosproject.net.flow.instructions.Instructions;
-import org.onosproject.net.flow.FlowEntry;
-import org.onosproject.net.flow.DefaultFlowEntry;
-import org.onosproject.net.flow.FlowRuleService;
-import org.onosproject.net.flowobjective.FlowObjectiveService;
-import org.onosproject.net.mcast.McastEvent;
-import org.onosproject.net.mcast.McastListener;
-import org.onosproject.net.mcast.McastRoute;
-import org.onosproject.net.mcast.McastRouteInfo;
-import org.onosproject.net.mcast.MulticastRouteService;
-import org.opencord.cordconfig.access.AccessDeviceConfig;
-import org.opencord.cordconfig.access.AccessDeviceData;
-import org.osgi.service.component.ComponentContext;
-import org.onosproject.net.PortNumber;
-import org.onlab.packet.IPv4;
-import org.slf4j.Logger;
-
-import java.util.Dictionary;
-import java.util.Map;
-import java.util.Collection;
-import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.base.Strings.isNullOrEmpty;
-import static org.onlab.util.Tools.get;
-import static org.slf4j.LoggerFactory.getLogger;
-
-/**
- * CORD multicast provisioning application. Operates by listening to
- * events on the multicast rib and provisioning groups to program multicast
- * flows on the dataplane.
- */
-@Component(immediate = true)
-public class CordIgmp {
-
-
-    private static final int DEFAULT_PRIORITY = 500;
-    private static final short DEFAULT_MCAST_VLAN = 4000;
-    private static final boolean DEFAULT_VLAN_ENABLED = false;
-    private static final short DEFAULT_INPUT_PORT = 2;
-    private static final short DEFAULT_OUTPUT_PORT = 1;
-    private final Logger log = getLogger(getClass());
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected MulticastRouteService mcastService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected FlowObjectiveService flowObjectiveService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected CoreService coreService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected ComponentConfigService componentConfigService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected NetworkConfigRegistry networkConfig;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected FlowRuleService flowRuleService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected DeviceService deviceService;
-
-    protected McastListener listener = new InternalMulticastListener();
-    private InternalNetworkConfigListener configListener =
-            new InternalNetworkConfigListener();
-    private DeviceListener deviceListener = new InternalDeviceListener();
-
-    //Map of IGMP groups to port
-    private Map<IpAddress, IgmpPortPair> cordIgmpTranslateTable = Maps.newConcurrentMap();
-
-    //Count of group joins
-    private Multiset<IpAddress> cordIgmpCountTable = ConcurrentHashMultiset.create();
-
-    //TODO: move this to distributed atomic long
-    private AtomicInteger channels = new AtomicInteger(0);
-
-    private ApplicationId appId;
-
-    @Property(name = "mcastVlan", intValue = DEFAULT_MCAST_VLAN,
-            label = "VLAN for multicast traffic")
-    private int mcastVlan = DEFAULT_MCAST_VLAN;
-
-    @Property(name = "vlanEnabled", boolValue = DEFAULT_VLAN_ENABLED,
-            label = "Use vlan for multicast traffic?")
-    private boolean vlanEnabled = DEFAULT_VLAN_ENABLED;
-
-    @Property(name = "priority", intValue = DEFAULT_PRIORITY,
-            label = "Priority for multicast rules")
-    private int priority = DEFAULT_PRIORITY;
-
-    @Property(name = "inputPort", intValue = DEFAULT_INPUT_PORT,
-              label = "Input port for OVS multicast traffic")
-    private int inputPort = DEFAULT_INPUT_PORT;
-
-    @Property(name = "outputPort", intValue = DEFAULT_OUTPUT_PORT,
-              label = "Output port for OVS multicast traffic")
-    private int outputPort = DEFAULT_OUTPUT_PORT;
-
-    private Map<DeviceId, AccessDeviceData> oltData = new ConcurrentHashMap<>();
-
-    private Map<DeviceId, Boolean> deviceAvailability = new ConcurrentHashMap<>();
-
-    private static final Class<CordIgmpTranslateConfig> CORD_IGMP_TRANSLATE_CONFIG_CLASS =
-            CordIgmpTranslateConfig.class;
-
-    private ConfigFactory<ApplicationId, CordIgmpTranslateConfig> cordIgmpTranslateConfigFactory =
-            new ConfigFactory<ApplicationId, CordIgmpTranslateConfig>(
-                    SubjectFactories.APP_SUBJECT_FACTORY, CORD_IGMP_TRANSLATE_CONFIG_CLASS, "cordIgmpTranslate", true) {
-                @Override
-                public CordIgmpTranslateConfig createConfig() {
-                    return new CordIgmpTranslateConfig();
-                }
-            };
-
-
-    @Activate
-    public void activate(ComponentContext context) {
-        componentConfigService.registerProperties(getClass());
-        modified(context);
-
-        appId = coreService.registerApplication("org.ciena.cordigmp");
-
-        networkConfig.registerConfigFactory(cordIgmpTranslateConfigFactory);
-        networkConfig.addListener(configListener);
-
-        networkConfig.getSubjects(DeviceId.class, AccessDeviceConfig.class).forEach(
-                subject -> {
-                    AccessDeviceConfig config = networkConfig.getConfig(subject, AccessDeviceConfig.class);
-                    if (config != null) {
-                        AccessDeviceData data = config.getOlt();
-                        oltData.put(data.deviceId(), data);
-                    }
-                }
-        );
-
-        CordIgmpTranslateConfig cordIgmpTranslateConfig = networkConfig.getConfig(appId, CordIgmpTranslateConfig.class);
-
-        if (cordIgmpTranslateConfig != null) {
-            Collection<McastPorts> translations = cordIgmpTranslateConfig.getCordIgmpTranslations();
-            for (McastPorts port: translations) {
-                cordIgmpTranslateTable.put(port.group(),
-                                           port.portPair());
-            }
-        }
-
-        mcastService.addListener(listener);
-
-        mcastService.getRoutes().stream()
-                .map(r -> new ImmutablePair<>(r, mcastService.fetchSinks(r)))
-                .filter(pair -> pair.getRight() != null && !pair.getRight().isEmpty())
-                .forEach(pair -> pair.getRight().forEach(sink -> provisionGroup(pair.getLeft(),
-                                                                                sink)));
-
-        deviceService.addListener(deviceListener);
-
-        log.info("Started");
-    }
-
-    @Deactivate
-    public void deactivate() {
-        componentConfigService.unregisterProperties(getClass(), false);
-        deviceService.removeListener(deviceListener);
-        mcastService.removeListener(listener);
-        networkConfig.unregisterConfigFactory(cordIgmpTranslateConfigFactory);
-        networkConfig.removeListener(configListener);
-        deviceAvailability.clear();
-        log.info("Stopped");
-    }
-
-    @Modified
-    public void modified(ComponentContext context) {
-        Dictionary<?, ?> properties = context != null ? context.getProperties() : new Properties();
-
-        try {
-            String s = get(properties, "mcastVlan");
-            mcastVlan = isNullOrEmpty(s) ? DEFAULT_MCAST_VLAN : Short.parseShort(s.trim());
-
-            s = get(properties, "vlanEnabled");
-            vlanEnabled = isNullOrEmpty(s) ? DEFAULT_VLAN_ENABLED : Boolean.parseBoolean(s.trim());
-
-            s = get(properties, "priority");
-            priority = isNullOrEmpty(s) ? DEFAULT_PRIORITY : Integer.parseInt(s.trim());
-
-            s = get(properties, "inputPort");
-            inputPort = isNullOrEmpty(s) ? DEFAULT_INPUT_PORT : Short.parseShort(s.trim());
-
-            s = get(properties, "outputPort");
-            outputPort = isNullOrEmpty(s) ? DEFAULT_OUTPUT_PORT : Short.parseShort(s.trim());
-
-        } catch (Exception e) {
-            mcastVlan = DEFAULT_MCAST_VLAN;
-            vlanEnabled = false;
-            priority = DEFAULT_PRIORITY;
-            inputPort = DEFAULT_INPUT_PORT;
-            outputPort = DEFAULT_OUTPUT_PORT;
-        }
-    }
-
-    private class InternalMulticastListener implements McastListener {
-        @Override
-        public void event(McastEvent event) {
-            McastRouteInfo info = event.subject();
-            switch (event.type()) {
-                case ROUTE_ADDED:
-                    break;
-                case ROUTE_REMOVED:
-                    break;
-                case SOURCE_ADDED:
-                    break;
-                case SINK_ADDED:
-                    if (!info.sink().isPresent()) {
-                        log.warn("No sink given after sink added event: {}", info);
-                        return;
-                    }
-                    provisionGroup(info.route(), info.sink().get());
-                    break;
-                case SINK_REMOVED:
-                    unprovisionGroup(event.subject());
-                    break;
-                default:
-                    log.warn("Unknown mcast event {}", event.type());
-            }
-        }
-    }
-
-    private void provisionFilterIgmp(DeviceId devId, boolean remove) {
-        Boolean deviceStatus = deviceAvailability.get(devId);
-        if (deviceStatus != null) {
-            if (!remove) {
-                return;
-            }
-        } else if (remove) {
-            return;
-        }
-        TrafficSelector.Builder igmp = DefaultTrafficSelector.builder()
-            .matchEthType(Ethernet.TYPE_IPV4)
-            .matchIPProtocol(IPv4.PROTOCOL_IGMP);
-        TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder()
-            .setOutput(PortNumber.CONTROLLER);
-        FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
-        flowEntry.forDevice(devId);
-        flowEntry.withPriority(priority);
-        flowEntry.withSelector(igmp.build());
-        flowEntry.withTreatment(treatment.build());
-        flowEntry.fromApp(appId);
-        flowEntry.makePermanent();
-        if (!remove) {
-            deviceAvailability.put(devId, true);
-            flowRuleService.applyFlowRules(flowEntry.build());
-        } else {
-            deviceAvailability.remove(devId);
-            flowRuleService.removeFlowRules(flowEntry.build());
-        }
-        log.warn("IGMP flow rule " + (remove ? "removed" : "added") + " for device id " + devId);
-    }
-
-    private class InternalDeviceListener implements DeviceListener {
-        @Override
-        public void event(DeviceEvent event) {
-            DeviceId devId = event.subject().id();
-            switch (event.type()) {
-
-                case DEVICE_ADDED:
-                case DEVICE_UPDATED:
-                    provisionFilterIgmp(devId, false);
-                    break;
-                case DEVICE_AVAILABILITY_CHANGED:
-                    if (deviceService.isAvailable(devId)) {
-                        provisionFilterIgmp(devId, false);
-                    } else {
-                        provisionFilterIgmp(devId, true);
-                    }
-                    break;
-                case DEVICE_REMOVED:
-                case DEVICE_SUSPENDED:
-                    provisionFilterIgmp(devId, true);
-                    break;
-                case PORT_STATS_UPDATED:
-                case PORT_ADDED:
-                case PORT_UPDATED:
-                case PORT_REMOVED:
-                    //log.debug("Got event " + event.type() + " for device " + devId);
-                    break;
-                default:
-                    log.warn("Unknown device event {}", event.type());
-                    break;
-            }
-        }
-    }
-
-    private void unprovisionGroup(McastRouteInfo info) {
-        if (!info.sink().isPresent()) {
-            log.warn("No sink given after sink removed event: {}", info);
-            return;
-        }
-        ConnectPoint loc = info.sink().get();
-        AccessDeviceData oltInfo = oltData.get(loc.deviceId());
-        if (oltInfo != null) {
-            log.warn("Ignoring deprovisioning mcast route for OLT device: " + loc.deviceId());
-            return;
-        }
-        final IgmpPortPair portPair = cordIgmpTranslateTable.get(info.route().group());
-        if (portPair == null) {
-            log.warn("Ignoring unprovisioning for group " + info.route().group() + " with no port map");
-            return;
-        }
-        if (cordIgmpCountTable.remove(info.route().group(), 1) <= 1) {
-            //Remove flow for last channel leave
-            final PortNumber inPort = PortNumber.portNumber(portPair.inputPort());
-            final PortNumber outPort = PortNumber.portNumber(portPair.outputPort());
-            TrafficSelector.Builder mcast = DefaultTrafficSelector.builder()
-                .matchInPort(inPort)
-                .matchEthType(Ethernet.TYPE_IPV4)
-                .matchIPDst(info.route().group().toIpPrefix());
-            TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
-            FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
-            treatment.add(Instructions.createOutput(outPort));
-            flowEntry.forDevice(loc.deviceId());
-            flowEntry.withPriority(priority);
-            flowEntry.withSelector(mcast.build());
-            flowEntry.withTreatment(treatment.build());
-            flowEntry.fromApp(appId);
-            flowEntry.makePermanent();
-            flowRuleService.removeFlowRules(flowEntry.build());
-            log.warn("Flow rule removed for for device id " + loc.deviceId());
-        }
-    }
-
-    private void provisionGroup(McastRoute route, ConnectPoint sink) {
-        checkNotNull(route, "Route cannot be null");
-        checkNotNull(sink, "Sink cannot be null");
-
-        AccessDeviceData oltInfo = oltData.get(sink.deviceId());
-        if (oltInfo != null) {
-            log.warn("Ignoring provisioning mcast route for OLT device: " + sink.deviceId());
-            return;
-        }
-        final IgmpPortPair portPair = cordIgmpTranslateTable.get(route.group());
-        if (portPair == null) {
-            log.warn("Ports for Group " + route.group() + " not found in cord igmp map. Skipping provisioning.");
-            return;
-        }
-        if (cordIgmpCountTable.count(route.group()) == 0) {
-            //First group entry. Provision the flows
-            final PortNumber inPort = PortNumber.portNumber(portPair.inputPort());
-            final PortNumber outPort = PortNumber.portNumber(portPair.outputPort());
-            TrafficSelector.Builder mcast = DefaultTrafficSelector.builder()
-                    .matchInPort(inPort)
-                    .matchEthType(Ethernet.TYPE_IPV4)
-                    .matchIPDst(route.group().toIpPrefix());
-            TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
-            FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
-            treatment.add(Instructions.createOutput(outPort));
-            flowEntry.forDevice(sink.deviceId());
-            flowEntry.withPriority(priority);
-            flowEntry.withSelector(mcast.build());
-            flowEntry.withTreatment(treatment.build());
-            flowEntry.fromApp(appId);
-            flowEntry.makePermanent();
-            flowRuleService.applyFlowRules(flowEntry.build());
-            log.warn("Flow rules applied for device id " + sink.deviceId());
-        }
-        cordIgmpCountTable.add(route.group());
-    }
-
-    private class InternalNetworkConfigListener implements NetworkConfigListener {
-        @Override
-        public void event(NetworkConfigEvent event) {
-            switch (event.type()) {
-
-                case CONFIG_ADDED:
-                case CONFIG_UPDATED:
-                    if (event.configClass().equals(CORD_IGMP_TRANSLATE_CONFIG_CLASS)) {
-                        CordIgmpTranslateConfig config =
-                                networkConfig.getConfig((ApplicationId) event.subject(),
-                                        CORD_IGMP_TRANSLATE_CONFIG_CLASS);
-                        if (config != null) {
-                            cordIgmpTranslateTable.clear();
-                            cordIgmpCountTable.clear();
-                            config.getCordIgmpTranslations().forEach(
-                                mcastPorts -> cordIgmpTranslateTable.put(mcastPorts.group(), mcastPorts.portPair()));
-                        }
-                    }
-                    break;
-                case CONFIG_REGISTERED:
-                case CONFIG_UNREGISTERED:
-                case CONFIG_REMOVED:
-                    break;
-                default:
-                    break;
-            }
-        }
-
-        //@Override
-        //public boolean isRelevant(NetworkConfigEvent event) {
-        //    return event.configClass().equals(CORD_IGMP_TRANSLATE_CONFIG_CLASS);
-        //}
-
-
-    }
-
-}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java
deleted file mode 100644
index 3d00259..0000000
--- a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.ciena.cordigmp;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.onlab.packet.IpAddress;
-import org.onosproject.core.ApplicationId;
-import org.onosproject.net.config.Config;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * IGMP SSM translate configuration.
- */
-public class CordIgmpTranslateConfig extends Config<ApplicationId> {
-
-    private static final String GROUP = "group";
-    private static final String INPUT_PORT = "inputPort";
-    private static final String OUTPUT_PORT = "outputPort";
-
-    @Override
-    public boolean isValid() {
-        for (JsonNode node : array) {
-            if (!hasOnlyFields((ObjectNode) node, GROUP, INPUT_PORT, OUTPUT_PORT)) {
-                return false;
-            }
-
-            if (!(isIpAddress((ObjectNode) node, GROUP, FieldPresence.MANDATORY) &&
-                  node.get(INPUT_PORT).isInt() && node.get(OUTPUT_PORT).isInt())) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Gets the list of CordIgmp translations.
-     *
-     * @return CordIgmp translations
-     */
-    public List<McastPorts> getCordIgmpTranslations() {
-        List<McastPorts> translations = new ArrayList();
-        for (JsonNode node : array) {
-            translations.add(
-                    new McastPorts(
-                            IpAddress.valueOf(node.path(GROUP).asText().trim()),
-                            Integer.valueOf(node.path(INPUT_PORT).asText().trim()),
-                            Integer.valueOf(node.path(OUTPUT_PORT).asText().trim())));
-        }
-        return translations;
-    }
-}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java
deleted file mode 100644
index a3a6706..0000000
--- a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2015-2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.cordigmp;
-
-public class IgmpPortPair {
-    private final Integer inputPort;
-    private final Integer outputPort;
-
-    public IgmpPortPair(Integer inputPort, Integer outputPort) {
-        this.inputPort = inputPort;
-        this.outputPort = outputPort;
-    }
-
-    public Integer inputPort() {
-        return inputPort;
-    }
-
-    public Integer outputPort() {
-        return outputPort;
-    }
-}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java
deleted file mode 100644
index 0ec4db3..0000000
--- a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2015 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.cordigmp;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Objects;
-import org.onlab.packet.IpAddress;
-
-import static com.google.common.base.MoreObjects.toStringHelper;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-/*
- * An entity representing a multicast group and its input and output ports.
- */
-@Beta
-public class McastPorts {
-
-    private final IpAddress group;
-    private final IgmpPortPair portPair;
-
-    public McastPorts(IpAddress group, Integer inputPort, Integer outputPort) {
-        checkNotNull(group, "Multicast route must specify a group address");
-        checkNotNull(inputPort, "Must indicate input port");
-        checkNotNull(outputPort, "Must indicate output port");
-        this.group = group;
-        this.portPair = new IgmpPortPair(inputPort, outputPort);
-    }
-
-    /**
-     * Fetches the group address of this route.
-     *
-     * @return an ip address
-     */
-    public IpAddress group() {
-        return group;
-    }
-
-    public Integer inputPort() {
-        return portPair.inputPort();
-    }
-
-    public Integer outputPort() {
-        return portPair.outputPort();
-    }
-
-    public IgmpPortPair portPair() {
-        return portPair;
-    }
-
-    @Override
-    public String toString() {
-        return toStringHelper(this)
-                .add("group", group)
-                .add("inputPort", inputPort())
-                .add("outputPort", outputPort())
-                .toString();
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-        McastPorts that = (McastPorts) o;
-        return Objects.equal(group, that.group) &&
-               Objects.equal(inputPort(), that.inputPort()) &&
-               Objects.equal(outputPort(), that.outputPort());
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hashCode(group, inputPort(), outputPort());
-    }
-
-}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/package-info.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/package-info.java
deleted file mode 100644
index 7214bf7..0000000
--- a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2015-2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Ciena application for cord tester to provision flows with ovs.
- * This is required as OVS onos driver does not support multi-table inserts.
- * This application takes a port pair configuration per group to provision flows.
- * To be used in simulation environments with subscriber tests.
- * On the target, cordmcast app should be used.
- */
-package org.ciena.cordigmp;
diff --git a/src/test/apps/cord-config-1.0-SNAPSHOT.oar b/src/test/apps/cord-config-1.0-SNAPSHOT.oar
deleted file mode 100644
index eb68e9f..0000000
--- a/src/test/apps/cord-config-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/cord-config-1.1-SNAPSHOT.oar b/src/test/apps/cord-config-1.1-SNAPSHOT.oar
deleted file mode 100644
index e7fc767..0000000
--- a/src/test/apps/cord-config-1.1-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/cord-config-1.2-SNAPSHOT.oar b/src/test/apps/cord-config-1.2-SNAPSHOT.oar
deleted file mode 100644
index c4993ba..0000000
--- a/src/test/apps/cord-config-1.2-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/cord-config-2.0-SNAPSHOT.oar b/src/test/apps/cord-config-2.0-SNAPSHOT.oar
deleted file mode 100644
index 76b5235..0000000
--- a/src/test/apps/cord-config-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/cord-config-3.0-SNAPSHOT.oar b/src/test/apps/cord-config-3.0-SNAPSHOT.oar
deleted file mode 100644
index 12cf795..0000000
--- a/src/test/apps/cord-config-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/dhcpl2relay-1.0.0.oar b/src/test/apps/dhcpl2relay-1.0.0.oar
deleted file mode 100644
index 4af0cd2..0000000
--- a/src/test/apps/dhcpl2relay-1.0.0.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-1.0-SNAPSHOT.oar b/src/test/apps/igmp-1.0-SNAPSHOT.oar
deleted file mode 100644
index a4893f7..0000000
--- a/src/test/apps/igmp-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-1.1-SNAPSHOT-onos-1.7.oar b/src/test/apps/igmp-1.1-SNAPSHOT-onos-1.7.oar
deleted file mode 100644
index d2b5df5..0000000
--- a/src/test/apps/igmp-1.1-SNAPSHOT-onos-1.7.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-1.1-SNAPSHOT.oar b/src/test/apps/igmp-1.1-SNAPSHOT.oar
deleted file mode 100644
index 76b29c2..0000000
--- a/src/test/apps/igmp-1.1-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-1.2-SNAPSHOT.oar b/src/test/apps/igmp-1.2-SNAPSHOT.oar
deleted file mode 100644
index f73fa0a..0000000
--- a/src/test/apps/igmp-1.2-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-2.0-SNAPSHOT.oar b/src/test/apps/igmp-2.0-SNAPSHOT.oar
deleted file mode 100644
index 9db0750..0000000
--- a/src/test/apps/igmp-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/igmp-3.0-SNAPSHOT.oar b/src/test/apps/igmp-3.0-SNAPSHOT.oar
deleted file mode 100644
index 79c40e8..0000000
--- a/src/test/apps/igmp-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/mcast-1.3.0-SNAPSHOT.oar b/src/test/apps/mcast-1.3.0-SNAPSHOT.oar
deleted file mode 100644
index c51738a..0000000
--- a/src/test/apps/mcast-1.3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/olt-app-1.1-SNAPSHOT.oar b/src/test/apps/olt-app-1.1-SNAPSHOT.oar
deleted file mode 100644
index 09e73f1..0000000
--- a/src/test/apps/olt-app-1.1-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/olt-app-1.2-SNAPSHOT.oar b/src/test/apps/olt-app-1.2-SNAPSHOT.oar
deleted file mode 100644
index 3301b44..0000000
--- a/src/test/apps/olt-app-1.2-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/olt-app-1.3.0-SNAPSHOT.oar b/src/test/apps/olt-app-1.3.0-SNAPSHOT.oar
deleted file mode 100644
index 1b5606b..0000000
--- a/src/test/apps/olt-app-1.3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/olt-app-2.0-SNAPSHOT.oar b/src/test/apps/olt-app-2.0-SNAPSHOT.oar
deleted file mode 100644
index 8237f08..0000000
--- a/src/test/apps/olt-app-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/olt-app-3.0-SNAPSHOT.oar b/src/test/apps/olt-app-3.0-SNAPSHOT.oar
deleted file mode 100644
index ecb272e..0000000
--- a/src/test/apps/olt-app-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/onos-app-igmpproxy-1.1.0-SNAPSHOT.oar b/src/test/apps/onos-app-igmpproxy-1.1.0-SNAPSHOT.oar
deleted file mode 100644
index 1e9becf..0000000
--- a/src/test/apps/onos-app-igmpproxy-1.1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/sadis-app-1.0.0-SNAPSHOT.oar b/src/test/apps/sadis-app-1.0.0-SNAPSHOT.oar
deleted file mode 100644
index 3f59f5a..0000000
--- a/src/test/apps/sadis-app-1.0.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/sadis-app-1.2-SNAPSHOT.oar b/src/test/apps/sadis-app-1.2-SNAPSHOT.oar
deleted file mode 100644
index 3f59f5a..0000000
--- a/src/test/apps/sadis-app-1.2-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/sadis-app-3.0-SNAPSHOT.oar b/src/test/apps/sadis-app-3.0-SNAPSHOT.oar
deleted file mode 100644
index ec62abd..0000000
--- a/src/test/apps/sadis-app-3.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/vtn-1.0-SNAPSHOT.oar b/src/test/apps/vtn-1.0-SNAPSHOT.oar
deleted file mode 100644
index 4bc8852..0000000
--- a/src/test/apps/vtn-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/vtn-1.1-SNAPSHOT.oar b/src/test/apps/vtn-1.1-SNAPSHOT.oar
deleted file mode 100644
index eb68df2..0000000
--- a/src/test/apps/vtn-1.1-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/xconnect-1.0-SNAPSHOT.oar b/src/test/apps/xconnect-1.0-SNAPSHOT.oar
deleted file mode 100644
index 4089ed7..0000000
--- a/src/test/apps/xconnect-1.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/xconnect-2.0-SNAPSHOT.oar b/src/test/apps/xconnect-2.0-SNAPSHOT.oar
deleted file mode 100644
index 74794c5..0000000
--- a/src/test/apps/xconnect-2.0-SNAPSHOT.oar
+++ /dev/null
Binary files differ
diff --git a/src/test/apps/xconnect/pom.xml b/src/test/apps/xconnect/pom.xml
deleted file mode 100644
index c8d924e..0000000
--- a/src/test/apps/xconnect/pom.xml
+++ /dev/null
@@ -1,150 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Copyright 2017 Open Networking Foundation
-  ~
-  ~ Licensed under the Apache License, Version 2.0 (the "License");
-  ~ you may not use this file except in compliance with the License.
-  ~ You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <groupId>org.ciena.xconnect</groupId>
-    <artifactId>xconnect</artifactId>
-    <version>2.0-SNAPSHOT</version>
-    <packaging>bundle</packaging>
-
-    <description>ONOS OSGi bundle archetype</description>
-    <url>http://onosproject.org</url>
-
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-        <onos.version>1.10.0-rc4</onos.version>
-
-        <onos.app.name>org.ciena.xconnect</onos.app.name>
-        <onos.app.title>ciena xconnect</onos.app.title>
-        <onos.app.origin>ciena</onos.app.origin>
-        <onos.app.category>default</onos.app.category>
-        <onos.app.url>http://onosproject.org</onos.app.url>
-        <onos.app.readme>ONOS OSGi bundle archetype.</onos.app.readme>
-
-    </properties>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.onosproject</groupId>
-            <artifactId>onos-api</artifactId>
-            <version>${onos.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.onosproject</groupId>
-            <artifactId>onlab-osgi</artifactId>
-            <version>${onos.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.onosproject</groupId>
-            <artifactId>onos-core-serializers</artifactId>
-            <version>${onos.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.12</version>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.onosproject</groupId>
-            <artifactId>onos-api</artifactId>
-            <version>${onos.version}</version>
-            <scope>test</scope>
-            <classifier>tests</classifier>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.felix</groupId>
-            <artifactId>org.apache.felix.scr.annotations</artifactId>
-            <version>1.9.12</version>
-            <scope>provided</scope>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.felix</groupId>
-                <artifactId>maven-bundle-plugin</artifactId>
-                <version>3.0.1</version>
-                <extensions>true</extensions>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.5.1</version>
-                <configuration>
-                    <source>1.8</source>
-                    <target>1.8</target>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.felix</groupId>
-                <artifactId>maven-scr-plugin</artifactId>
-                <version>1.21.0</version>
-                <executions>
-                    <execution>
-                        <id>generate-scr-srcdescriptor</id>
-                        <goals>
-                            <goal>scr</goal>
-                        </goals>
-                    </execution>
-                </executions>
-                <configuration>
-                    <supportedProjectTypes>
-                        <supportedProjectType>bundle</supportedProjectType>
-                        <supportedProjectType>war</supportedProjectType>
-                    </supportedProjectTypes>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.onosproject</groupId>
-                <artifactId>onos-maven-plugin</artifactId>
-                <version>1.10</version>
-                <executions>
-                    <execution>
-                        <id>cfg</id>
-                        <phase>generate-resources</phase>
-                        <goals>
-                            <goal>cfg</goal>
-                        </goals>
-                    </execution>
-                    <execution>
-                        <id>swagger</id>
-                        <phase>generate-sources</phase>
-                        <goals>
-                            <goal>swagger</goal>
-                        </goals>
-                    </execution>
-                    <execution>
-                        <id>app</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>app</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-
-</project>
diff --git a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/AppComponent.java b/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/AppComponent.java
deleted file mode 100644
index 9793ab1..0000000
--- a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/AppComponent.java
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright 2017-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.xconnect;
-import org.onosproject.net.config.NetworkConfigEvent;
-import org.onosproject.net.config.NetworkConfigListener;
-import org.onosproject.net.config.NetworkConfigRegistry;
-import org.onosproject.net.config.basics.SubjectFactories;
-import org.onosproject.mastership.MastershipService;
-import org.onosproject.core.CoreService;
-import org.onosproject.core.ApplicationId;
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.google.common.collect.ImmutableSet;
-import org.apache.felix.scr.annotations.*;
-import org.onlab.packet.MacAddress;
-import org.onlab.packet.VlanId;
-import org.onosproject.net.DeviceId;
-import org.onosproject.net.PortNumber;
-import org.onosproject.net.flow.DefaultTrafficSelector;
-import org.onosproject.net.flow.DefaultTrafficTreatment;
-import org.onosproject.net.flow.TrafficSelector;
-import org.onosproject.net.flow.TrafficTreatment;
-import org.onosproject.net.flowobjective.*;
-import org.onosproject.net.flow.criteria.Criteria;
-import org.onlab.util.KryoNamespace;
-import org.onosproject.store.serializers.KryoNamespaces;
-import org.onosproject.store.service.ConsistentMap;
-import org.onosproject.store.service.Serializer;
-import org.onosproject.store.service.StorageService;
-import org.onosproject.net.config.ConfigFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * Skeletal ONOS application component.
- */
-@Component(immediate = true)
-public class AppComponent {
-
-    private final Logger log = LoggerFactory.getLogger(getClass());
-    private static final String NOT_MASTER = "Not master controller";
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    public FlowObjectiveService flowObjectiveService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected CoreService coreService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected MastershipService mastershipService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    protected StorageService storageService;
-
-    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
-    public NetworkConfigRegistry networkConfig;
-
-    private InternalNetworkConfigListener configListener =
-            new InternalNetworkConfigListener();
-
-    private ApplicationId appId;
-
-    private KryoNamespace.Builder xConnectKryo;
-
-    private ConsistentMap<XConnectStoreKey, NextObjective> xConnectNextObjStore;
-
-    private static final Class<XConnectTestConfig> XCONNECT_TEST_CONFIG_CLASS = XConnectTestConfig.class;
-
-    private ConfigFactory<ApplicationId, XConnectTestConfig> xconnectTestConfigFactory =
-            new ConfigFactory<ApplicationId, XConnectTestConfig>(
-                    SubjectFactories.APP_SUBJECT_FACTORY, XCONNECT_TEST_CONFIG_CLASS, "xconnectTestConfig") {
-                @Override
-                public XConnectTestConfig createConfig() {
-                    return new XConnectTestConfig();
-                }
-            };
-
-    @Activate
-    protected void activate() {
-        log.info("Started");
-        appId = coreService.registerApplication("org.ciena.xconnect");
-
-        xConnectKryo = new KryoNamespace.Builder()
-                .register(KryoNamespaces.API)
-                .register(XConnectStoreKey.class)
-                .register(NextObjContext.class);
-
-        xConnectNextObjStore = storageService
-                .<XConnectStoreKey, NextObjective>consistentMapBuilder()
-                .withName("cordtester-xconnect-nextobj-store")
-                .withSerializer(Serializer.using(xConnectKryo.build()))
-                .build();
-
-        networkConfig.addListener(configListener);
-        networkConfig.registerConfigFactory(xconnectTestConfigFactory);
-
-        XConnectTestConfig config = networkConfig.getConfig(appId, XConnectTestConfig.class);
-
-        if (config != null) {
-            config.getXconnects().forEach(key -> {
-                    populateXConnect(key, config.getPorts(key));
-                });
-        }
-    }
-
-    @Deactivate
-    protected void deactivate() {
-        log.info("Stopped");
-        networkConfig.removeListener(configListener);
-        XConnectTestConfig config = networkConfig.getConfig(appId, XConnectTestConfig.class);
-        //remove flows on app deactivate
-        if (config != null) {
-            config.getXconnects().forEach(key -> {
-                    revokeXConnect(key, config.getPorts(key));
-                });
-        }
-        networkConfig.unregisterConfigFactory(xconnectTestConfigFactory);
-    }
-
-    /**
-     * Populates XConnect groups and flows for given key.
-     *
-     * @param key XConnect key
-     * @param ports a set of ports to be cross-connected
-     */
-    private void populateXConnect(XConnectStoreKey key, Set<PortNumber> ports) {
-        if (!mastershipService.isLocalMaster(key.deviceId())) {
-            log.info("Abort populating XConnect {}: {}", key, NOT_MASTER);
-            return;
-        }
-        populateFilter(key, ports);
-        populateFwd(key, populateNext(key, ports));
-    }
-
-    private void populateFilter(XConnectStoreKey key, Set<PortNumber> ports) {
-        ports.forEach(port -> {
-            FilteringObjective.Builder filtObjBuilder = filterObjBuilder(key, port);
-            ObjectiveContext context = new DefaultObjectiveContext(
-                    (objective) -> log.debug("XConnect FilterObj for {} on port {} populated",
-                            key, port),
-                    (objective, error) ->
-                            log.warn("Failed to populate XConnect FilterObj for {} on port {}: {}",
-                                    key, port, error));
-            flowObjectiveService.filter(key.deviceId(), filtObjBuilder.add(context));
-        });
-    }
-
-    private FilteringObjective.Builder filterObjBuilder(XConnectStoreKey key, PortNumber port) {
-        FilteringObjective.Builder fob = DefaultFilteringObjective.builder();
-        fob.withKey(Criteria.matchInPort(port))
-                .addCondition(Criteria.matchVlanId(key.vlanId()))
-                .addCondition(Criteria.matchEthDst(MacAddress.NONE))
-                .withPriority(1234);
-        return fob.permit().fromApp(appId);
-    }
-
-    private NextObjective populateNext(XConnectStoreKey key, Set<PortNumber> ports) {
-        NextObjective nextObj = null;
-        if (xConnectNextObjStore.containsKey(key)) {
-            nextObj = xConnectNextObjStore.get(key).value();
-            log.debug("NextObj for {} found, id={}", key, nextObj.id());
-        } else {
-            NextObjective.Builder nextObjBuilder = nextObjBuilder(key, ports);
-            ObjectiveContext nextContext = new NextObjContext(Objective.Operation.ADD, key);
-            nextObj = nextObjBuilder.add(nextContext);
-            flowObjectiveService.next(key.deviceId(), nextObj);
-            xConnectNextObjStore.put(key, nextObj);
-            log.info("NextObj for {} not found. Creating new NextObj with id={}", key, nextObj.id());
-        }
-        return nextObj;
-    }
-
-    private NextObjective.Builder nextObjBuilder(XConnectStoreKey key, Set<PortNumber> ports) {
-        int nextId = flowObjectiveService.allocateNextId();
-        TrafficSelector metadata =
-                DefaultTrafficSelector.builder().matchVlanId(key.vlanId()).build();
-        NextObjective.Builder nextObjBuilder = DefaultNextObjective
-                .builder().withId(nextId)
-                .withType(NextObjective.Type.BROADCAST).fromApp(appId)
-                .withMeta(metadata);
-        ports.forEach(port -> {
-            TrafficTreatment.Builder tBuilder = DefaultTrafficTreatment.builder();
-            tBuilder.setOutput(port);
-            nextObjBuilder.addTreatment(tBuilder.build());
-        });
-        return nextObjBuilder;
-    }
-
-    private void populateFwd(XConnectStoreKey key, NextObjective nextObj) {
-        ForwardingObjective.Builder fwdObjBuilder = fwdObjBuilder(key, nextObj.id());
-        ObjectiveContext fwdContext = new DefaultObjectiveContext(
-                (objective) -> log.debug("XConnect FwdObj for {} populated", key),
-                (objective, error) ->
-                        log.warn("Failed to populate XConnect FwdObj for {}: {}", key, error));
-        flowObjectiveService.forward(key.deviceId(), fwdObjBuilder.add(fwdContext));
-    }
-
-    private ForwardingObjective.Builder fwdObjBuilder(XConnectStoreKey key, int nextId) {
-        /*
-         * Driver should treat objectives with MacAddress.NONE and !VlanId.NONE
-         * as the VLAN cross-connect broadcast rules
-         */
-        TrafficSelector.Builder sbuilder = DefaultTrafficSelector.builder();
-        sbuilder.matchVlanId(key.vlanId());
-        sbuilder.matchEthDst(MacAddress.NONE);
-
-        ForwardingObjective.Builder fob = DefaultForwardingObjective.builder();
-        fob.withFlag(ForwardingObjective.Flag.SPECIFIC)
-                .withSelector(sbuilder.build())
-                .nextStep(nextId)
-                .withPriority(32768)
-                .fromApp(appId)
-                .makePermanent();
-        return fob;
-    }
-
-    /**
-     * Processes Segment Routing App Config added event.
-     *
-     * @param event network config added event
-     */
-    protected void processXConnectConfigAdded(NetworkConfigEvent event) {
-        log.info("Processing XConnect CONFIG_ADDED");
-        XConnectTestConfig config = (XConnectTestConfig) event.config().get();
-        config.getXconnects().forEach(key -> {
-            populateXConnect(key, config.getPorts(key));
-        });
-    }
-
-    /**
-     * Processes Segment Routing App Config removed event.
-     *
-     * @param event network config removed event
-     */
-    protected void processXConnectConfigRemoved(NetworkConfigEvent event) {
-        log.info("Processing XConnect CONFIG_REMOVED");
-        XConnectTestConfig prevConfig = (XConnectTestConfig) event.prevConfig().get();
-        prevConfig.getXconnects().forEach(key -> {
-            revokeXConnect(key, prevConfig.getPorts(key));
-        });
-    }
-
-    /**
-     * Revokes filtering objectives for given XConnect.
-     *
-     * @param key XConnect store key
-     * @param ports XConnect ports
-     */
-    private void revokeFilter(XConnectStoreKey key, Set<PortNumber> ports) {
-        ports.forEach(port -> {
-            FilteringObjective.Builder filtObjBuilder = filterObjBuilder(key, port);
-            ObjectiveContext context = new DefaultObjectiveContext(
-                    (objective) -> log.debug("XConnect FilterObj for {} on port {} revoked",
-                            key, port),
-                    (objective, error) ->
-                            log.warn("Failed to revoke XConnect FilterObj for {} on port {}: {}",
-                                    key, port, error));
-            flowObjectiveService.filter(key.deviceId(), filtObjBuilder.remove(context));
-        });
-    }
-
-    /**
-     * Revokes next objectives for given XConnect.
-     *
-     * @param key XConnect store key
-     * @param nextObj next objective
-     * @param nextFuture completable future for this next objective operation
-     */
-    private void revokeNext(XConnectStoreKey key, NextObjective nextObj,
-            CompletableFuture<ObjectiveError> nextFuture) {
-        ObjectiveContext context = new ObjectiveContext() {
-            @Override
-            public void onSuccess(Objective objective) {
-                log.debug("Previous NextObj for {} removed", key);
-                if (nextFuture != null) {
-                    nextFuture.complete(null);
-                }
-            }
-
-            @Override
-            public void onError(Objective objective, ObjectiveError error) {
-                log.warn("Failed to remove previous NextObj for {}: {}", key, error);
-                if (nextFuture != null) {
-                    nextFuture.complete(error);
-                }
-            }
-        };
-        flowObjectiveService.next(key.deviceId(),
-                                  (NextObjective) nextObj.copy().remove(context));
-        xConnectNextObjStore.remove(key);
-    }
-
-    /**
-     * Revokes forwarding objectives for given XConnect.
-     *
-     * @param key XConnect store key
-     * @param nextObj next objective
-     * @param fwdFuture completable future for this forwarding objective operation
-     */
-    private void revokeFwd(XConnectStoreKey key, NextObjective nextObj,
-            CompletableFuture<ObjectiveError> fwdFuture) {
-        ForwardingObjective.Builder fwdObjBuilder = fwdObjBuilder(key, nextObj.id());
-        ObjectiveContext context = new ObjectiveContext() {
-            @Override
-            public void onSuccess(Objective objective) {
-                log.debug("Previous FwdObj for {} removed", key);
-                if (fwdFuture != null) {
-                    fwdFuture.complete(null);
-                }
-            }
-
-            @Override
-            public void onError(Objective objective, ObjectiveError error) {
-                log.warn("Failed to remove previous FwdObj for {}: {}", key, error);
-                if (fwdFuture != null) {
-                    fwdFuture.complete(error);
-                }
-            }
-        };
-        flowObjectiveService
-            .forward(key.deviceId(), fwdObjBuilder.remove(context));
-    }
-
-    private void revokeXConnect(XConnectStoreKey key, Set<PortNumber> ports) {
-        if (!mastershipService.isLocalMaster(key.deviceId())) {
-            log.info("Abort populating XConnect {}: {}", key, NOT_MASTER);
-            return;
-        }
-        revokeFilter(key, ports);
-        if (xConnectNextObjStore.containsKey(key)) {
-            NextObjective nextObj = xConnectNextObjStore.get(key).value();
-            revokeFwd(key, nextObj, null);
-            revokeNext(key, nextObj, null);
-        } else {
-            log.warn("NextObj for {} does not exist in the store.", key);
-        }
-    }
-
-    private final class NextObjContext implements ObjectiveContext {
-        Objective.Operation op;
-        XConnectStoreKey key;
-
-        private NextObjContext(Objective.Operation op, XConnectStoreKey key) {
-            this.op = op;
-            this.key = key;
-        }
-
-        @Override
-        public void onSuccess(Objective objective) {
-            log.debug("XConnect NextObj for {} {}ED", key, op);
-        }
-
-        @Override
-        public void onError(Objective objective, ObjectiveError error) {
-            log.warn("Failed to {} XConnect NextObj for {}: {}", op, key, error);
-        }
-    }
-
-    private class InternalNetworkConfigListener implements NetworkConfigListener {
-
-        @Override
-        public void event(NetworkConfigEvent event) {
-            if (event.configClass().equals(XCONNECT_TEST_CONFIG_CLASS)) {
-                switch (event.type()) {
-                    case CONFIG_ADDED:
-                        processXConnectConfigAdded(event);
-                        break;
-                    case CONFIG_UPDATED:
-                        log.info("CONFIG UPDATED event is unhandled");
-                        break;
-                    case CONFIG_REMOVED:
-                        processXConnectConfigRemoved(event);
-                        break;
-                    default:
-                        break;
-                }
-            }
-        }
-    }
-}
diff --git a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectStoreKey.java b/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectStoreKey.java
deleted file mode 100644
index 4bd0f03..0000000
--- a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectStoreKey.java
+++ /dev/null
@@ -1,85 +0,0 @@
-package org.ciena.xconnect;
-/*
- * Copyright 2016-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//package org.onosproject.segmentrouting.storekey;
-
-import org.onlab.packet.VlanId;
-import org.onosproject.net.DeviceId;
-
-import java.util.Objects;
-
-/**
- * Key of VLAN cross-connect next objective store.
- */
-public class XConnectStoreKey {
-    private final DeviceId deviceId;
-    private final VlanId vlanId;
-
-    /**
-     * Constructs the key of cross-connect next objective store.
-     *
-     * @param deviceId device ID of the VLAN cross-connection
-     * @param vlanId VLAN ID of the VLAN cross-connection
-     */
-    public XConnectStoreKey(DeviceId deviceId, VlanId vlanId) {
-        this.deviceId = deviceId;
-        this.vlanId = vlanId;
-    }
-
-    /**
-     * Returns the device ID of this key.
-     *
-     * @return device ID
-     */
-    public DeviceId deviceId() {
-        return this.deviceId;
-    }
-
-    /**
-     * Returns the VLAN ID of this key.
-     *
-     * @return VLAN ID
-     */
-    public VlanId vlanId() {
-        return this.vlanId;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (!(o instanceof XConnectStoreKey)) {
-            return false;
-        }
-        XConnectStoreKey that =
-                (XConnectStoreKey) o;
-        return (Objects.equals(this.deviceId, that.deviceId) &&
-                Objects.equals(this.vlanId, that.vlanId));
-    }
-
-    // The list of neighbor ids and label are used for comparison.
-    @Override
-    public int hashCode() {
-        return Objects.hash(deviceId, vlanId);
-    }
-
-    @Override
-    public String toString() {
-        return "Device: " + deviceId + " VlanId: " + vlanId;
-    }
-}
diff --git a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectTestConfig.java b/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectTestConfig.java
deleted file mode 100644
index 61c7f48..0000000
--- a/src/test/apps/xconnect/src/main/java/org/ciena/xconnect/XConnectTestConfig.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2016 Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.ciena.xconnect;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.google.common.collect.ImmutableSet;
-import org.onlab.packet.VlanId;
-import org.onosproject.core.ApplicationId;
-import org.onosproject.net.DeviceId;
-import org.onosproject.net.PortNumber;
-import org.onosproject.net.config.Config;
-
-import java.util.Set;
-
-import static com.google.common.base.Preconditions.checkArgument;
-
-/**
- * Configuration object for cross-connect.
- */
-public class XConnectTestConfig extends Config<ApplicationId> {
-
-    private static final String VLAN = "vlan";
-    private static final String PORTS = "ports";
-    private static final String UNEXPECTED_FIELD_NAME = "Unexpected field name";
-
-    @Override
-    public boolean isValid() {
-        try {
-            getXconnects().forEach(this::getPorts);
-        } catch (IllegalArgumentException e) {
-            return false;
-        }
-        return true;
-    }
-
-    /**
-     * Returns all xconnect keys.
-     *
-     * @return all keys (device/vlan pairs)
-     * @throws IllegalArgumentException if wrong format
-     */
-    public Set<XConnectStoreKey> getXconnects() {
-        ImmutableSet.Builder<XConnectStoreKey> builder = ImmutableSet.builder();
-        object.fields().forEachRemaining(entry -> {
-            DeviceId deviceId = DeviceId.deviceId(entry.getKey());
-            builder.addAll(getXconnects(deviceId));
-        });
-        return builder.build();
-    }
-
-    /**
-     * Returns xconnect keys of given device.
-     *
-     * @param deviceId ID of the device from which we want to get XConnect info
-     * @return xconnect keys (device/vlan pairs) of given device
-     * @throws IllegalArgumentException if wrong format
-     */
-    public Set<XConnectStoreKey> getXconnects(DeviceId deviceId) {
-        ImmutableSet.Builder<XConnectStoreKey> builder = ImmutableSet.builder();
-        JsonNode vlanPortPair = object.get(deviceId.toString());
-        if (vlanPortPair != null) {
-            vlanPortPair.forEach(jsonNode -> {
-                if (!hasOnlyFields((ObjectNode) jsonNode, VLAN, PORTS)) {
-                    throw new IllegalArgumentException(UNEXPECTED_FIELD_NAME);
-                }
-                VlanId vlanId = VlanId.vlanId((short) jsonNode.get(VLAN).asInt());
-                builder.add(new XConnectStoreKey(deviceId, vlanId));
-            });
-        }
-        return builder.build();
-    }
-
-    /**
-     * Returns ports of given xconnect key.
-     *
-     * @param xconnect xconnect key
-     * @return set of two ports associated with given xconnect key
-     * @throws IllegalArgumentException if wrong format
-     */
-    public Set<PortNumber> getPorts(XConnectStoreKey xconnect) {
-        ImmutableSet.Builder<PortNumber> builder = ImmutableSet.builder();
-        object.get(xconnect.deviceId().toString()).forEach(vlanPortsPair -> {
-            if (xconnect.vlanId().toShort() == vlanPortsPair.get(VLAN).asInt()) {
-                int portCount = vlanPortsPair.get(PORTS).size();
-                checkArgument(portCount == 2,
-                        "Expect 2 ports but found " + portCount + " on " + xconnect);
-                vlanPortsPair.get(PORTS).forEach(portNode -> {
-                    builder.add(PortNumber.portNumber(portNode.asInt()));
-                });
-            }
-        });
-        return builder.build();
-    }
-}
diff --git a/src/test/apps/xconnect/src/test/java/org/ciena/xconnect/AppComponentTest.java b/src/test/apps/xconnect/src/test/java/org/ciena/xconnect/AppComponentTest.java
deleted file mode 100644
index 7dabbc4..0000000
--- a/src/test/apps/xconnect/src/test/java/org/ciena/xconnect/AppComponentTest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2017-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.ciena.xconnect;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Set of tests of the ONOS application component.
- */
-public class AppComponentTest {
-
-    private AppComponent component;
-
-    @Before
-    public void setUp() {
-        component = new AppComponent();
-        component.activate();
-
-    }
-
-    @After
-    public void tearDown() {
-        component.deactivate();
-    }
-
-    @Test
-    public void basics() {
-
-    }
-
-}
diff --git a/src/test/apps/xconnect/xconnect.iml b/src/test/apps/xconnect/xconnect.iml
deleted file mode 100644
index 0ecc1bf..0000000
--- a/src/test/apps/xconnect/xconnect.iml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
-  <component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_8" inherit-compiler-output="false">
-    <output url="file://$MODULE_DIR$/target/classes" />
-    <output-test url="file://$MODULE_DIR$/target/test-classes" />
-    <content url="file://$MODULE_DIR$">
-      <sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
-      <sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
-      <excludeFolder url="file://$MODULE_DIR$/target" />
-    </content>
-    <orderEntry type="inheritedJdk" />
-    <orderEntry type="sourceFolder" forTests="false" />
-    <orderEntry type="library" name="Maven: org.onosproject:onos-api:1.8.0-SNAPSHOT" level="project" />
-    <orderEntry type="library" name="Maven: joda-time:joda-time:2.9.3" level="project" />
-    <orderEntry type="library" name="Maven: commons-configuration:commons-configuration:1.10" level="project" />
-    <orderEntry type="library" name="Maven: commons-lang:commons-lang:2.6" level="project" />
-    <orderEntry type="library" name="Maven: commons-logging:commons-logging:1.1.1" level="project" />
-    <orderEntry type="library" name="Maven: commons-collections:commons-collections:3.2.2" level="project" />
-    <orderEntry type="library" name="Maven: org.onosproject:onlab-rest:1.8.0-SNAPSHOT" level="project" />
-    <orderEntry type="library" name="Maven: javax.ws.rs:javax.ws.rs-api:2.0.1" level="project" />
-    <orderEntry type="library" name="Maven: com.google.guava:guava:19.0" level="project" />
-    <orderEntry type="library" name="Maven: org.onosproject:onlab-misc:1.8.0-SNAPSHOT" level="project" />
-    <orderEntry type="library" name="Maven: io.netty:netty:3.10.5.Final" level="project" />
-    <orderEntry type="library" name="Maven: org.apache.commons:commons-lang3:3.4" level="project" />
-    <orderEntry type="library" name="Maven: com.eclipsesource.minimal-json:minimal-json:0.9.4" level="project" />
-    <orderEntry type="library" name="Maven: com.esotericsoftware:kryo:4.0.0" level="project" />
-    <orderEntry type="library" name="Maven: com.esotericsoftware:reflectasm:1.11.3" level="project" />
-    <orderEntry type="library" name="Maven: org.ow2.asm:asm:5.0.4" level="project" />
-    <orderEntry type="library" name="Maven: com.esotericsoftware:minlog:1.3.0" level="project" />
-    <orderEntry type="library" name="Maven: org.objenesis:objenesis:2.2" level="project" />
-    <orderEntry type="library" name="Maven: io.dropwizard.metrics:metrics-core:3.1.2" level="project" />
-    <orderEntry type="library" name="Maven: org.slf4j:slf4j-api:1.7.7" level="project" />
-    <orderEntry type="library" name="Maven: io.dropwizard.metrics:metrics-json:3.1.2" level="project" />
-    <orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-databind:2.4.2" level="project" />
-    <orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-annotations:2.4.0" level="project" />
-    <orderEntry type="library" name="Maven: com.fasterxml.jackson.core:jackson-core:2.4.2" level="project" />
-    <orderEntry type="library" name="Maven: com.google.code.findbugs:jsr305:3.0.1" level="project" />
-    <orderEntry type="library" name="Maven: org.onosproject:onlab-osgi:1.8.0-SNAPSHOT" level="project" />
-    <orderEntry type="library" scope="TEST" name="Maven: junit:junit:4.12" level="project" />
-    <orderEntry type="library" scope="TEST" name="Maven: org.hamcrest:hamcrest-core:1.3" level="project" />
-    <orderEntry type="library" scope="TEST" name="Maven: org.onosproject:onos-api:tests:1.8.0-SNAPSHOT" level="project" />
-    <orderEntry type="library" scope="PROVIDED" name="Maven: org.apache.felix:org.apache.felix.scr.annotations:1.9.12" level="project" />
-  </component>
-</module>
\ No newline at end of file
diff --git a/src/test/apps/xconnect/xconnect.json b/src/test/apps/xconnect/xconnect.json
deleted file mode 100644
index b0321e6..0000000
--- a/src/test/apps/xconnect/xconnect.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-    "apps":
-    {
-        "org.ciena.xconnect":
-        {
-            "xconnectTestConfig":
-            {
-                "of:00007a916740ea43":
-                [
-                    {
-                        "vlan": 555,
-                        "ports": [1, 2]
-                    }
-                ]
-            }
-        }
-    }
-}
diff --git a/src/test/builder/Makefile b/src/test/builder/Makefile
deleted file mode 100644
index dcaf783..0000000
--- a/src/test/builder/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-## Test State Machine builder
-
-all: build_fsm
-
-build_fsm:
-	@sh buildFsm.sh ../fsm
-
-clean:
-	rm -f *~ *.pyc ../fsm/*
diff --git a/src/test/builder/buildFsm.sh b/src/test/builder/buildFsm.sh
deleted file mode 100644
index ace2f5e..0000000
--- a/src/test/builder/buildFsm.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-odir="$1"
-if [ -z "$odir" ]; then
-    odir = "./"
-fi
-
-##Generate TLS authentication Test state machine
-python yamlFsm.py -p TlsAuthHolder -f noseTlsAuthTest.yaml > ${odir}/noseTlsAuthHolder.py
-
-##Generate PAP authentication state machine
-python yamlFsm.py -p PAPAuthHolder -f nosePAPTest.yaml > ${odir}/nosePAPAuthHolder.py
-
-
-##Generate DNS test state machine
-#python yamlFsm.py -p DnsHolder -f noseDnsTest.yaml > ${odir}/noseDnsHolder.py
-
-#Generate EAP MD5 authentication state machine
-python yamlFsm.py -p Md5AuthHolder -f noseMD5AuthTest.yaml > ${odir}/noseMd5AuthHolder.py
-
-
diff --git a/src/test/builder/noseDnsTest.yaml b/src/test/builder/noseDnsTest.yaml
deleted file mode 100644
index 3bfcff6..0000000
--- a/src/test/builder/noseDnsTest.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-States:
-    ST_DNS_SND_REC:
-        Events:
-            EVT_DNS_SND_REC:
-                Actions:
-                    - _dns_snd_rec 
-                NextState: ST_DNS_FINAL
-        
-
diff --git a/src/test/builder/noseMD5AuthTest.yaml b/src/test/builder/noseMD5AuthTest.yaml
deleted file mode 100644
index 80e050d..0000000
--- a/src/test/builder/noseMD5AuthTest.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-States:
-    ST_EAP_SETUP:
-        Events:
-            EVT_EAP_SETUP:
-                Actions:
-                    - _eapSetup
-                NextState: ST_EAP_START
-    ST_EAP_START:
-        Events:
-            EVT_EAP_START:
-                Actions:
-                    - _eapStart
-                NextState: ST_EAP_ID_REQ
-    ST_EAP_ID_REQ:
-        Events:
-            EVT_EAP_ID_REQ:
-                Actions:
-                    - _eapIdReq
-                NextState: ST_EAP_MD5_CHALLENGE
-    ST_EAP_MD5_CHALLENGE:
-        Events:
-            EVT_EAP_MD5_CHALLENGE:
-                Actions:
-                    - _eapMd5Challenge 
-                NextState: ST_EAP_STATUS
-    ST_EAP_STATUS:
-        Events:
-            EVT_EAP_STATUS:
-                Actions:
-                    - _eapStatus
-                NextState: ST_EAP_MD5_DONE
-
diff --git a/src/test/builder/nosePAPTest.yaml b/src/test/builder/nosePAPTest.yaml
deleted file mode 100644
index 1a917be..0000000
--- a/src/test/builder/nosePAPTest.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-States:
-    ST_EAP_SETUP:
-        Events:
-            EVT_EAP_SETUP:
-                Actions:
-                    - _eapSetup
-                NextState: ST_EAP_START
-    ST_EAP_START:
-        Events:
-            EVT_EAP_START:
-                Actions:
-                    - _eapStart
-                NextState: ST_EAP_ID_REQ
-    ST_EAP_ID_REQ:
-        Events:
-            EVT_EAP_ID_REQ:
-                Actions:
-                    - _eapIdReq
-                NextState: ST_EAP_PAP_USER_REQ
-    ST_EAP_PAP_USER_REQ:
-        Events:
-            EVT_EAP_PAP_USER_REQ:
-                Actions:
-                    - _eapPAPUserReq
-                NextState: ST_EAP_PAP_PASSWD_REQ
-    ST_EAP_PAP_PASSWD_REQ:
-        Events:
-            EVT_EAP_PAP_PASSWD_REQ:
-                Actions:
-                    - _eapPAPPassReq
-                NextState: ST_EAP_PAP_DONE
-        
diff --git a/src/test/builder/noseTlsAuthTest.yaml b/src/test/builder/noseTlsAuthTest.yaml
deleted file mode 100644
index 464e157..0000000
--- a/src/test/builder/noseTlsAuthTest.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-States:
-    ST_EAP_SETUP:
-        Events:
-            EVT_EAP_SETUP:
-                Actions:
-                    - _eapSetup
-                NextState: ST_EAP_START
-    ST_EAP_START:
-        Events:
-            EVT_EAP_START:
-                Actions:
-                    - _eapStart
-                NextState: ST_EAP_ID_REQ
-    ST_EAP_ID_REQ:
-        Events:
-            EVT_EAP_ID_REQ:
-                Actions:
-                    - _eapIdReq
-                NextState: ST_EAP_TLS_HELLO_REQ
-    ST_EAP_TLS_HELLO_REQ:
-        Events:
-            EVT_EAP_TLS_HELLO_REQ:
-                Actions:
-                    - _eapTlsHelloReq
-                NextState: ST_EAP_TLS_CERT_REQ
-    ST_EAP_TLS_CERT_REQ:
-        Events:
-            EVT_EAP_TLS_CERT_REQ:
-                Actions:
-                    - _eapTlsCertReq
-                NextState: ST_EAP_TLS_CHANGE_CIPHER_SPEC
-    ST_EAP_TLS_CHANGE_CIPHER_SPEC:
-        Events:
-            EVT_EAP_TLS_CHANGE_CIPHER_SPEC:
-                Actions:
-                    - _eapTlsChangeCipherSpec
-                NextState: ST_EAP_TLS_FINISHED
-    ST_EAP_TLS_FINISHED:
-        Events:
-            EVT_EAP_TLS_FINISHED:
-                Actions:
-                    - _eapTlsFinished
-                NextState: ST_EAP_TLS_DONE
-        
\ No newline at end of file
diff --git a/src/test/builder/yamlFsm.py b/src/test/builder/yamlFsm.py
deleted file mode 100644
index b17c6a9..0000000
--- a/src/test/builder/yamlFsm.py
+++ /dev/null
@@ -1,237 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import yaml, pprint, sys, pdb
-
-stateHash = {}
-header = '''#!/usr/bin/env python
-'''
-# ---------------------------- DOT -----------------------------------
-colorList = ['aquamarine4', 'crimson', 'chartreuse4', 'darkolivegreen', 'darkgoldenrod', 'dodgerblue3', 'blue4', 'cyan4']
-rankdict = {}
-# ---------------------------- DOT -----------------------------------
-
-if __name__ == '__main__':
-
-    usage = ''
-    from optparse import OptionParser
-    parser = OptionParser(usage)
-    parser.add_option('-p', '--prefix', dest='prefix', type='string', action='store', help='prefix for state table')
-    parser.add_option('-f', '--file', dest='file', type='string', action='store', help='input yaml filename')
-    parser.add_option('-d', '--dot', dest='dot', default=False, action='store_true', help='output DOT')
-    (opts, args) = parser.parse_args()
-    prefix = opts.prefix
-    f = open(opts.file, 'r')
-    y = yaml.load(f)
-    f.close()
-    stateHash = y['States']
-    eventHash = {}
-    # GLOBAL DOT DIRECTIVES
-    stateRadiate        = y.get('DOT_StateRadiate')
-    ignoredIntensity    = abs(int(y.get('DOT_IgnoredIntensity', 100)) - 100)
-    eventGroups         = y.get('DOT_EventGroups')
-    if stateRadiate is not None:
-        stateRadiate = str(stateRadiate)
-
-    actionStrLen = [0]
-    stateColorIdx = 0
-    for k, v in stateHash.iteritems():
-        events = v.get('Events')
-        if events:
-            for event in events.keys():
-                eventHash[event] = {}
-            actionStr = ''
-            for ev in events.values():
-                if ev.get('Actions'):
-                    actionStr = ','.join(['obj.%s' % action for action in ev['Actions']]) + ','
-                    actionStrLen.append(len(actionStr))
-            
-        ievents = v.get('IgnoredEvents')
-        if ievents:
-            for event in ievents.keys():
-                eventHash[event] = {}
-
-        # ---------------------------- DOT -----------------------------------
-        # rankdict setup
-        rank = v.get('DOT_Rank')
-        if rank:
-            print >>sys.stderr, '%s rank %s' % (k, str(rank)) 
-            rankdict.setdefault(rank, []).append(k)
-
-        # assign a possible color if not specified
-        color = v.get('DOT_Color')
-        if color:
-            print >>sys.stderr, 'using user assigned color %s for %s' % (color, k)
-        else:            
-            if stateRadiate and stateRadiate.lower() == 'auto':
-                color = colorList[stateColorIdx % len(colorList)]
-                stateColorIdx+= 1
-            else:
-                color = 'black'
-                
-        stateHash[k]['DOT_Color'] = color
-        # ---------------------------- DOT -----------------------------------        
-
-    # ---------------------------- DOT -----------------------------------
-    # update the event hash with information from the event groups (if present)
-    if eventGroups:
-        for group in eventGroups.values():
-            for event in group['Events'].keys():
-                for attr, val in group['Attrs'].iteritems():
-                    eventHash[event][attr] = val
-                    print >>sys.stderr, 'assigning event group attr event %s attr %s val %s' % (event, attr, val)
-    # ---------------------------- DOT -----------------------------------
-
-    maxStateLen = reduce(max, [len(x) for x in stateHash.keys()]) + 5 + len(prefix) 
-    maxEventLen = reduce(max, [len(x) for x in eventHash.keys()]) + 5 + len(prefix)
-    maxActionLen = reduce(max, actionStrLen) + 5
-
-    if opts.dot:
-        print 'digraph G {'
-        print ' edge  [fontname="Tahoma", fontsize="10", minlen=2];'
-        print ' node  [fontname="Tahoma", fontsize="10"];'
-        print ' graph [fontname="Tahoma", label="%s"];' % prefix
-        print >>sys.stderr, 'stateRadiate:%s\nignoredIntensity:%d' % (stateRadiate, ignoredIntensity)
-        
-        # emit state declarations
-        for state in stateHash.keys():
-            print ' %s[color="%s"];' % (state, stateHash[state]['DOT_Color'])
-
-        # emit rankings        
-        for k, v in rankdict.iteritems():
-            print >>sys.stderr, '%s rank %s' % (k, str(v)) 
-
-            print 'subgraph { rank = same;'
-            for state in v:
-                    print ' %s;' % state 
-            print '}'
-            
-        for state, va in stateHash.iteritems():
-            # emit ignored events
-            if va.get('IgnoredEvents'):
-                for event, v in va['IgnoredEvents'].iteritems():
-                    stateStr = state
-                    eventStr = event
-                    print '%s -> %s [label="%s/",minlen=1, fontcolor="grey%d", color="grey%d"];' % (stateStr, stateStr, eventStr, ignoredIntensity, ignoredIntensity)
-
-            # emit transitions
-            if va.get('Events'):
-                for event, v in va['Events'].iteritems():
-                    stateStr = state
-                    eventStr = event
-                    actionStr = ''
-                    if v.get('Actions'):
-                        actionStr = '\\n'.join([a.strip('_') for a in v['Actions']])
-                    nextStr = v['NextState']
-                    labelStr = '%s/\\n%s' % (eventStr, actionStr)
-                    if stateRadiate:
-                        color = va['DOT_Color']
-                    elif len(eventHash[event]):
-                        color = eventHash[event]['Color']
-                    else:
-                        color = 'black'
-
-                    fontColor = color
-                    styleStr = ''
-                    style = eventHash[event].get('Style')
-                    if style:
-                        styleStr = ',style="%s"' % (style)
-
-                        if style == 'invis':
-                            fontColor = 'white'
-                        
-                    print '%s -> %s [label="%s", color="%s", fontcolor="%s" %s];' % (stateStr, nextStr, labelStr, color, fontColor, styleStr)
-                
-            print
-
-        print '}'
-
-    else:
-    
-### emit it
-
-        print header
-
-### enumerations
-        '''
-        print '%sSt = Enumeration("%sState",(' % (prefix, prefix)
-        for state in stateHash.keys():
-            print '%s"%s",' % (' '*12, state)
-        print '%s))' % (' '*12)
-
-        print 
-        
-        print '%sEv = Enumeration("%sEvent",(' % (prefix, prefix)
-        for event in eventHash.keys():
-            print '%s"%s",' % (' '*12, event)
-        print '%s))' % (' '*12)
-        '''
-### table
-
-        fmt = '      (%' + '-%d.%ds' % (maxStateLen, maxStateLen) + '%' + '-%d.%ds' % (maxEventLen, maxEventLen) + '):( %' +' -%d.%ds' % (maxActionLen, maxActionLen) + '%s),' 
-        cfmt= '    ## %' + '-%d.%ds' % (maxStateLen, maxStateLen) + '%' + '-%d.%ds' % (maxEventLen, maxEventLen) + '    %' +' -%d.%ds' % (maxActionLen, maxActionLen) + '%s' 
-
-        print 'def init%s%sFsmTable(obj,St,Ev):' % (prefix[0].upper(), prefix[1:])
-#        print "    %sFsmTable = {" % prefix
-        print "    return {"
-        print
-        
-        for state, va in stateHash.iteritems():
-
-            print cfmt % ('CurrentState', 'Event', 'Actions', 'NextState')
-            print
-
-            if va.get('IgnoredEvents'):
-                for event, v in va['IgnoredEvents'].iteritems():
-                    stateStr = '%sSt.' % ('') + state + ','
-                    eventStr = '%sEv.' % ('') + event
-                
-                    print fmt % (stateStr, eventStr, '(),', stateStr.strip(','))
-
-            if va.get('Events'):
-                for event, v in va['Events'].iteritems():
-                    stateStr = '%sSt.' % ('') + state + ','
-                    eventStr = '%sEv.' % ('') + event
-                    actionStr = ''
-                    if v.get('Actions'):
-                        actionStr = ','.join(['obj.%s' % action for action in v['Actions']]) + ','
-                                        
-                    nextStr = '%sSt.' % ('') + v['NextState']
-                    
-                    print fmt % (stateStr, eventStr, '(%s),' % actionStr , nextStr)
-
-            print
-        
-        print "}"    
-        print    
-
-
diff --git a/src/test/cbench/__init__.py b/src/test/cbench/__init__.py
deleted file mode 100644
index 88eb0c5..0000000
--- a/src/test/cbench/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/cbench/cbench b/src/test/cbench/cbench
deleted file mode 100755
index 2e726fc..0000000
--- a/src/test/cbench/cbench
+++ /dev/null
Binary files differ
diff --git a/src/test/cbench/cbench.patch b/src/test/cbench/cbench.patch
deleted file mode 100644
index 0ce31e1..0000000
--- a/src/test/cbench/cbench.patch
+++ /dev/null
@@ -1,179 +0,0 @@
-diff --git a/cbench/cbench.c b/cbench/cbench.c
-index 70fed93..cdf8492 100644
---- a/cbench/cbench.c
-+++ b/cbench/cbench.c
-@@ -45,6 +45,7 @@ struct myargs my_options[] = {
-     {"connect-group-size",  'I', "number of switches in a connection delay group", MYARGS_INTEGER, {.integer = 1}},
-     {"learn-dst-macs",  'L', "send gratuitious ARP replies to learn destination macs before testing", MYARGS_FLAG, {.flag = 1}},
-     {"dpid-offset",  'o', "switch DPID offset", MYARGS_INTEGER, {.integer = 1}},
-+    {"igmp-test", 'g', "IGMP join leave test", MYARGS_FLAG, {.flag = 0}},
-     {0, 0, 0, 0}
- };
- 
-@@ -257,6 +258,7 @@ int main(int argc, char * argv[])
-     int     learn_dst_macs = myargs_get_default_flag(my_options, "learn-dst-macs");
-     int     dpid_offset = myargs_get_default_integer(my_options, "dpid-offset");
-     int     mode = MODE_LATENCY;
-+    int     igmp_test = myargs_get_default_flag(my_options, "igmp-test");
-     int     i,j;
- 
-     const struct option * long_opts = myargs_to_long(my_options);
-@@ -326,6 +328,9 @@ int main(int argc, char * argv[])
-             case 'o':
-                 dpid_offset = atoi(optarg);
-                 break;
-+            case 'g':
-+                igmp_test = 1;
-+                break;
-             default: 
-                 myargs_usage(my_options, PROG_TITLE, "help message", NULL, 1);
-         }
-@@ -388,7 +393,8 @@ int main(int argc, char * argv[])
-         if(debug)
-             fprintf(stderr,"Initializing switch %d ... ", i+1);
-         fflush(stderr);
--        fakeswitch_init(&fakeswitches[i],dpid_offset+i,sock,BUFLEN, debug, delay, mode, total_mac_addresses, learn_dst_macs);
-+        fakeswitch_init(&fakeswitches[i],dpid_offset+i,sock,BUFLEN, debug, delay, mode, total_mac_addresses,
-+                        learn_dst_macs, igmp_test);
-         if(debug)
-             fprintf(stderr," :: done.\n");
-         fflush(stderr);
-diff --git a/cbench/fakeswitch.c b/cbench/fakeswitch.c
-index a424d14..d3f16de 100644
---- a/cbench/fakeswitch.c
-+++ b/cbench/fakeswitch.c
-@@ -25,12 +25,14 @@ static int make_stats_desc_reply(struct ofp_stats_request * req, char * buf, int
- static int parse_set_config(struct ofp_header * msg);
- static int make_config_reply( int xid, char * buf, int buflen);
- static int make_vendor_reply(int xid, char * buf, int buflen);
--static int make_packet_in(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address);
- static int packet_out_is_lldp(struct ofp_packet_out * po);
- static void fakeswitch_handle_write(struct fakeswitch *fs);
- static void fakeswitch_learn_dstmac(struct fakeswitch *fs);
- void fakeswitch_change_status_now (struct fakeswitch *fs, int new_status);
- void fakeswitch_change_status (struct fakeswitch *fs, int new_status);
-+static int make_packet_in_default(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address);
-+static int make_packet_in_igmp(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address);
-+static int (*make_packet_in)(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address);
- 
- static struct ofp_switch_config Switch_config = {
- 	.header = { 	OFP_VERSION,
-@@ -51,7 +53,7 @@ static inline uint64_t ntohll(uint64_t n)
-     return htonl(1) == 1 ? n : ((uint64_t) ntohl(n) << 32) | ntohl(n >> 32);
- }
- 
--void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int debug, int delay, enum test_mode mode, int total_mac_addresses, int learn_dstmac)
-+void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int debug, int delay, enum test_mode mode, int total_mac_addresses, int learn_dstmac, int igmp_test)
- {
-     char buf[BUFLEN];
-     struct ofp_header ofph;
-@@ -62,6 +64,8 @@ void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int
-     fs->outbuf = msgbuf_new(bufsize);
-     fs->probe_state = 0;
-     fs->mode = mode;
-+    fs->igmp_test = igmp_test;
-+    make_packet_in = igmp_test ? make_packet_in_igmp : make_packet_in_default;
-     fs->probe_size = make_packet_in(fs->id, 0, 0, buf, BUFLEN, fs->current_mac_address++);
-     fs->count = 0;
-     fs->switch_status = START;
-@@ -71,7 +75,6 @@ void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int
-     fs->xid = 1;
-     fs->learn_dstmac = learn_dstmac;
-     fs->current_buffer_id = 1;
--  
-     ofph.version = OFP_VERSION;
-     ofph.type = OFPT_HELLO;
-     ofph.length = htons(sizeof(ofph));
-@@ -289,8 +292,54 @@ static int packet_out_is_lldp(struct ofp_packet_out * po){
- 	return ethertype == ETHERTYPE_LLDP;
- }
- 
-+static int make_packet_in_igmp(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address)
-+{
-+    struct ofp_packet_in * pi;
-+    struct ether_header * eth;
-+    static char fake_igmp_join[] = {
-+        0x97,0x0a,0x00,0x4c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,
-+        0x01,0x00,0x40,0x00,0x01,0x00,0x00,0x80,0x00,0x00,0x00,
-+        0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x02,0x08,0x00,0x46,
-+        0xc0,0x00,0x2c,0x00,0x01,0x00,0x00,0x01,0x02,0x3f,0x04,
-+        0x01,0x02,0x03,0x04,0xe0,0x00,0x01,0x01,0x94,0x04,0x00,
-+        0x00,0x22,0x00,0xf6,0xf5,0x00,0x00,0x00,0x01,0x01,0x00,
-+        0x00,0x01,0xe2,0x00,0x00,0x01,0x01,0x02,0x03,0x04,
-+    };
-+    static char fake_igmp_leave[] = {
-+        0x97,0x0a,0x00,0x4c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,
-+        0x01,0x00,0x40,0x00,0x01,0x00,0x00,0x80,0x00,0x00,0x00,
-+        0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x02,0x08,0x00,0x46,
-+        0xc0,0x00,0x2c,0x00,0x01,0x00,0x00,0x01,0x02,0x3f,0x04,
-+        0x01,0x02,0x03,0x04,0xe0,0x00,0x01,0x01,0x94,0x04,0x00,
-+        0x00,0x22,0x00,0xf5,0xf5,0x00,0x00,0x00,0x01,0x02,0x00,
-+        0x00,0x01,0xe2,0x00,0x00,0x01,0x01,0x02,0x03,0x04,
-+    };
-+    static char *fake_bufs[2] = { fake_igmp_join, fake_igmp_leave };
-+    static int fake_size_map[2] = { (int)sizeof(fake_igmp_join), (int)sizeof(fake_igmp_leave) };
-+    static int idx;
-+    int cur_idx = idx;
-+    int buf_size = fake_size_map[cur_idx];
-+    char *fake;
-+    fake = fake_bufs[cur_idx];
-+    idx ^= 1;
-+    assert(buflen > buf_size);
-+    memcpy(buf, fake, buf_size);
-+    pi = (struct ofp_packet_in *) buf;
-+    pi->header.version = OFP_VERSION;
-+    pi->header.xid = htonl(xid);
-+    pi->buffer_id = htonl(buffer_id);
-+    eth = (struct ether_header * ) pi->data;
-+    // copy into src mac addr; only 4 bytes, but should suffice to not confuse
-+    // the controller; don't overwrite first byte
-+    memcpy(&eth->ether_shost[1], &mac_address, sizeof(mac_address));
-+    // mark this as coming from us, mostly for debug
-+    eth->ether_dhost[5] = switch_id;
-+    eth->ether_shost[5] = switch_id;
-+    return buf_size;
-+}
-+
- /***********************************************************************/
--static int make_packet_in(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address)
-+static int make_packet_in_default(int switch_id, int xid, int buffer_id, char * buf, int buflen, int mac_address)
- {
-     struct ofp_packet_in * pi;
-     struct ether_header * eth;
-@@ -387,6 +436,7 @@ void fakeswitch_handle_read(struct fakeswitch *fs)
-                 if(fs->switch_status == READY_TO_SEND && (fm->command == htons(OFPFC_ADD) || 
-                         fm->command == htons(OFPFC_MODIFY_STRICT)))
-                 {
-+                    debug_msg(fs, "Got FLOW MOD response\n");
-                     fs->count++;        // got response to what we went
-                     fs->probe_state--;
-                 }
-@@ -488,6 +538,7 @@ static void fakeswitch_handle_write(struct fakeswitch *fs)
-         else if ((fs->mode == MODE_THROUGHPUT) && 
-                 (msgbuf_count_buffered(fs->outbuf) < throughput_buffer))  // keep buffer full
-             send_count = (throughput_buffer - msgbuf_count_buffered(fs->outbuf)) / fs->probe_size;
-+
-         for (i = 0; i < send_count; i++)
-         {
-             // queue up packet
-diff --git a/cbench/fakeswitch.h b/cbench/fakeswitch.h
-index d0352e7..26eb202 100644
---- a/cbench/fakeswitch.h
-+++ b/cbench/fakeswitch.h
-@@ -39,6 +39,7 @@ struct fakeswitch
-     int current_mac_address;
-     int learn_dstmac;
-     int current_buffer_id;
-+    int igmp_test;
- };
- 
- /*** Initialize an already allocated fakeswitch
-@@ -54,7 +55,7 @@ struct fakeswitch
-  * @param total_mac_addresses      The total number of unique mac addresses
-  *                                 to use for packet ins from this switch
-  */
--void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int debug, int delay, enum test_mode mode, int total_mac_addresses, int learn_dstmac);
-+void fakeswitch_init(struct fakeswitch *fs, int dpid, int sock, int bufsize, int debug, int delay, enum test_mode mode, int total_mac_addresses, int learn_dstmac, int igmp_test);
- 
- 
- /*** Set the desired flags for poll()
diff --git a/src/test/cbench/cbenchTest.py b/src/test/cbench/cbenchTest.py
deleted file mode 100644
index c7ddd1f..0000000
--- a/src/test/cbench/cbenchTest.py
+++ /dev/null
@@ -1,103 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import time
-import os
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from OnosCtrl import OnosCtrl
-from CordTestUtils import log_test as log
-
-log.setLevel('INFO')
-
-class cbench_exchange(unittest.TestCase):
-
-    igmp_app_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../apps',
-                                 'ciena-cordigmp-cbench-1.0-SNAPSHOT.oar')
-    igmp_app_file_default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../apps',
-                                         'ciena-cordigmp-2.0-SNAPSHOT.oar')
-    igmp_app = 'org.ciena.cordigmp'
-    switch_script = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup', 'of-bridge.sh')
-    switch = 'br-int'
-    ctlr_ip = os.getenv('ONOS_CONTROLLER_IP', 'localhost')
-    ctlr_port = '6653'
-    cbench = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cbench')
-    cbench_igmp_options = '-g -D 3000 -w 10 -c {} -p {}'.format(ctlr_ip, ctlr_port)
-    CBENCH_TIMEOUT = 60
-
-    @classmethod
-    def setUpClass(cls):
-        cls.stop_switch()
-        cls.install_app()
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.install_app_default()
-        cls.start_switch()
-
-    @classmethod
-    def install_app(cls):
-        OnosCtrl.uninstall_app(cls.igmp_app)
-        time.sleep(2)
-        OnosCtrl.install_app(cls.igmp_app_file)
-        time.sleep(3)
-
-    @classmethod
-    def install_app_default(cls):
-        OnosCtrl.uninstall_app(cls.igmp_app)
-        time.sleep(2)
-        OnosCtrl.install_app(cls.igmp_app_file_default)
-
-    @classmethod
-    def stop_switch(cls):
-        cmd = 'service openvswitch-switch stop'
-        log.info('Stopping switch before running cbench fakeswitch tests')
-        os.system(cmd)
-        time.sleep(1)
-
-    @classmethod
-    def start_switch(cls):
-        cmd = '{} {}'.format(cls.switch_script, cls.switch)
-        log.info('Starting back switch with command: \"%s\"', cmd)
-        os.system(cmd)
-        time.sleep(3)
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_cbench_igmp(self):
-        df = defer.Deferred()
-        def cbench_igmp_join_leave_loop(df):
-            cmd = '{} {} -l 20 -s 1 -m 1000'.format(self.cbench, self.cbench_igmp_options)
-            os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, cbench_igmp_join_leave_loop, df)
-        return df
diff --git a/src/test/cli/__init__.py b/src/test/cli/__init__.py
deleted file mode 100644
index 064ffcf..0000000
--- a/src/test/cli/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-__path__.append(utils_dir)
diff --git a/src/test/cli/ast.py b/src/test/cli/ast.py
deleted file mode 100644
index 7ea9b8a..0000000
--- a/src/test/cli/ast.py
+++ /dev/null
@@ -1,327 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# -*- coding: utf-8 -*-
-"""
-    ast
-    ~~~
-
-    The `ast` module helps Python applications to process trees of the Python
-    abstract syntax grammar.  The abstract syntax itself might change with
-    each Python release; this module helps to find out programmatically what
-    the current grammar looks like and allows modifications of it.
-
-    An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
-    a flag to the `compile()` builtin function or by using the `parse()`
-    function from this module.  The result will be a tree of objects whose
-    classes all inherit from `ast.AST`.
-
-    A modified abstract syntax tree can be compiled into a Python code object
-    using the built-in `compile()` function.
-
-    Additionally various helper functions are provided that make working with
-    the trees simpler.  The main intention of the helper functions and this
-    module in general is to provide an easy to use interface for libraries
-    that work tightly with the python syntax (template engines for example).
-
-
-    :copyright: Copyright 2008 by Armin Ronacher.
-    :license: Python License.
-"""
-from _ast import *
-from _ast import __version__
-
-
-def parse(source, filename='<unknown>', mode='exec'):
-    """
-    Parse the source into an AST node.
-    Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
-    """
-    return compile(source, filename, mode, PyCF_ONLY_AST)
-
-
-def literal_eval(node_or_string):
-    """
-    Safely evaluate an expression node or a string containing a Python
-    expression.  The string or node provided may only consist of the following
-    Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
-    and None.
-    """
-    _safe_names = {'None': None, 'True': True, 'False': False}
-    if isinstance(node_or_string, basestring):
-        node_or_string = parse(node_or_string, mode='eval')
-    if isinstance(node_or_string, Expression):
-        node_or_string = node_or_string.body
-    def _convert(node):
-        if isinstance(node, Str):
-            return node.s
-        elif isinstance(node, Num):
-            return node.n
-        elif isinstance(node, Tuple):
-            return tuple(map(_convert, node.elts))
-        elif isinstance(node, List):
-            return list(map(_convert, node.elts))
-        elif isinstance(node, Dict):
-            return dict((_convert(k), _convert(v)) for k, v
-                        in zip(node.keys, node.values))
-        elif isinstance(node, Name):
-            if node.id in _safe_names:
-                return _safe_names[node.id]
-        elif isinstance(node, BinOp) and \
-             isinstance(node.op, (Add, Sub)) and \
-             isinstance(node.right, Num) and \
-             isinstance(node.right.n, complex) and \
-             isinstance(node.left, Num) and \
-             isinstance(node.left.n, (int, long, float)):
-            left = node.left.n
-            right = node.right.n
-            if isinstance(node.op, Add):
-                return left + right
-            else:
-                return left - right
-        raise ValueError('malformed string')
-    return _convert(node_or_string)
-
-
-def dump(node, annotate_fields=True, include_attributes=False):
-    """
-    Return a formatted dump of the tree in *node*.  This is mainly useful for
-    debugging purposes.  The returned string will show the names and the values
-    for fields.  This makes the code impossible to evaluate, so if evaluation is
-    wanted *annotate_fields* must be set to False.  Attributes such as line
-    numbers and column offsets are not dumped by default.  If this is wanted,
-    *include_attributes* can be set to True.
-    """
-    def _format(node):
-        if isinstance(node, AST):
-            fields = [(a, _format(b)) for a, b in iter_fields(node)]
-            rv = '%s(%s' % (node.__class__.__name__, ', '.join(
-                ('%s=%s' % field for field in fields)
-                if annotate_fields else
-                (b for a, b in fields)
-            ))
-            if include_attributes and node._attributes:
-                rv += fields and ', ' or ' '
-                rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
-                                for a in node._attributes)
-            return rv + ')'
-        elif isinstance(node, list):
-            return '[%s]' % ', '.join(_format(x) for x in node)
-        return repr(node)
-    if not isinstance(node, AST):
-        raise TypeError('expected AST, got %r' % node.__class__.__name__)
-    return _format(node)
-
-
-def copy_location(new_node, old_node):
-    """
-    Copy source location (`lineno` and `col_offset` attributes) from
-    *old_node* to *new_node* if possible, and return *new_node*.
-    """
-    for attr in 'lineno', 'col_offset':
-        if attr in old_node._attributes and attr in new_node._attributes \
-           and hasattr(old_node, attr):
-            setattr(new_node, attr, getattr(old_node, attr))
-    return new_node
-
-
-def fix_missing_locations(node):
-    """
-    When you compile a node tree with compile(), the compiler expects lineno and
-    col_offset attributes for every node that supports them.  This is rather
-    tedious to fill in for generated nodes, so this helper adds these attributes
-    recursively where not already set, by setting them to the values of the
-    parent node.  It works recursively starting at *node*.
-    """
-    def _fix(node, lineno, col_offset):
-        if 'lineno' in node._attributes:
-            if not hasattr(node, 'lineno'):
-                node.lineno = lineno
-            else:
-                lineno = node.lineno
-        if 'col_offset' in node._attributes:
-            if not hasattr(node, 'col_offset'):
-                node.col_offset = col_offset
-            else:
-                col_offset = node.col_offset
-        for child in iter_child_nodes(node):
-            _fix(child, lineno, col_offset)
-    _fix(node, 1, 0)
-    return node
-
-
-def increment_lineno(node, n=1):
-    """
-    Increment the line number of each node in the tree starting at *node* by *n*.
-    This is useful to "move code" to a different location in a file.
-    """
-    for child in walk(node):
-        if 'lineno' in child._attributes:
-            child.lineno = getattr(child, 'lineno', 0) + n
-    return node
-
-
-def iter_fields(node):
-    """
-    Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
-    that is present on *node*.
-    """
-    for field in node._fields:
-        try:
-            yield field, getattr(node, field)
-        except AttributeError:
-            pass
-
-
-def iter_child_nodes(node):
-    """
-    Yield all direct child nodes of *node*, that is, all fields that are nodes
-    and all items of fields that are lists of nodes.
-    """
-    for name, field in iter_fields(node):
-        if isinstance(field, AST):
-            yield field
-        elif isinstance(field, list):
-            for item in field:
-                if isinstance(item, AST):
-                    yield item
-
-
-def get_docstring(node, clean=True):
-    """
-    Return the docstring for the given node or None if no docstring can
-    be found.  If the node provided does not have docstrings a TypeError
-    will be raised.
-    """
-    if not isinstance(node, (FunctionDef, ClassDef, Module)):
-        raise TypeError("%r can't have docstrings" % node.__class__.__name__)
-    if node.body and isinstance(node.body[0], Expr) and \
-       isinstance(node.body[0].value, Str):
-        if clean:
-            import inspect
-            return inspect.cleandoc(node.body[0].value.s)
-        return node.body[0].value.s
-
-
-def walk(node):
-    """
-    Recursively yield all descendant nodes in the tree starting at *node*
-    (including *node* itself), in no specified order.  This is useful if you
-    only want to modify nodes in place and don't care about the context.
-    """
-    from collections import deque
-    todo = deque([node])
-    while todo:
-        node = todo.popleft()
-        todo.extend(iter_child_nodes(node))
-        yield node
-
-
-class NodeVisitor(object):
-    """
-    A node visitor base class that walks the abstract syntax tree and calls a
-    visitor function for every node found.  This function may return a value
-    which is forwarded by the `visit` method.
-
-    This class is meant to be subclassed, with the subclass adding visitor
-    methods.
-
-    Per default the visitor functions for the nodes are ``'visit_'`` +
-    class name of the node.  So a `TryFinally` node visit function would
-    be `visit_TryFinally`.  This behavior can be changed by overriding
-    the `visit` method.  If no visitor function exists for a node
-    (return value `None`) the `generic_visit` visitor is used instead.
-
-    Don't use the `NodeVisitor` if you want to apply changes to nodes during
-    traversing.  For this a special visitor exists (`NodeTransformer`) that
-    allows modifications.
-    """
-
-    def visit(self, node):
-        """Visit a node."""
-        method = 'visit_' + node.__class__.__name__
-        visitor = getattr(self, method, self.generic_visit)
-        return visitor(node)
-
-    def generic_visit(self, node):
-        """Called if no explicit visitor function exists for a node."""
-        for field, value in iter_fields(node):
-            if isinstance(value, list):
-                for item in value:
-                    if isinstance(item, AST):
-                        self.visit(item)
-            elif isinstance(value, AST):
-                self.visit(value)
-
-
-class NodeTransformer(NodeVisitor):
-    """
-    A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
-    allows modification of nodes.
-
-    The `NodeTransformer` will walk the AST and use the return value of the
-    visitor methods to replace or remove the old node.  If the return value of
-    the visitor method is ``None``, the node will be removed from its location,
-    otherwise it is replaced with the return value.  The return value may be the
-    original node in which case no replacement takes place.
-
-    Here is an example transformer that rewrites all occurrences of name lookups
-    (``foo``) to ``data['foo']``::
-
-       class RewriteName(NodeTransformer):
-
-           def visit_Name(self, node):
-               return copy_location(Subscript(
-                   value=Name(id='data', ctx=Load()),
-                   slice=Index(value=Str(s=node.id)),
-                   ctx=node.ctx
-               ), node)
-
-    Keep in mind that if the node you're operating on has child nodes you must
-    either transform the child nodes yourself or call the :meth:`generic_visit`
-    method for the node first.
-
-    For nodes that were part of a collection of statements (that applies to all
-    statement nodes), the visitor may also return a list of nodes rather than
-    just a single node.
-
-    Usually you use the transformer like this::
-
-       node = YourTransformer().visit(node)
-    """
-
-    def generic_visit(self, node):
-        for field, old_value in iter_fields(node):
-            old_value = getattr(node, field, None)
-            if isinstance(old_value, list):
-                new_values = []
-                for value in old_value:
-                    if isinstance(value, AST):
-                        value = self.visit(value)
-                        if value is None:
-                            continue
-                        elif not isinstance(value, AST):
-                            new_values.extend(value)
-                            continue
-                    new_values.append(value)
-                old_value[:] = new_values
-            elif isinstance(old_value, AST):
-                new_node = self.visit(old_value)
-                if new_node is None:
-                    delattr(node, field)
-                else:
-                    setattr(node, field, new_node)
-        return node
diff --git a/src/test/cli/clicommon.py b/src/test/cli/clicommon.py
deleted file mode 100644
index 834db7e..0000000
--- a/src/test/cli/clicommon.py
+++ /dev/null
@@ -1,52 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-from utilities import Utilities, utilities
-from CordTestUtils import log_test as log
-
-#log.setLevel('INFO')
-class MAIN(object):
-    def __init__(self):
-        global utilities
-        self.log = log
-        self.logdir = os.getenv('HOME')
-        self.logHeader = ''
-        self.utilities = utilities
-        self.TRUE = True
-        self.FALSE = False
-        self.EXPERIMENTAL_MODE = self.FALSE
-
-    def cleanup(self): pass
-
-    def exit(self): pass
-
-main = MAIN()
diff --git a/src/test/cli/clidriver.py b/src/test/cli/clidriver.py
deleted file mode 100644
index e4baad0..0000000
--- a/src/test/cli/clidriver.py
+++ /dev/null
@@ -1,386 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-"""
-Created on 24-Oct-2012
-
-author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
-          Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
-
-
-    TestON is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
-
-    TestON is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
-
-
-
-"""
-import pexpect
-import re
-from component import Component
-from clicommon import *
-import os
-
-class CLI( Component ):
-
-    """
-        This will define common functions for CLI included.
-    """
-    def __init__( self ):
-        super( Component, self ).__init__()
-
-    def connect( self, **connectargs ):
-        """
-           Connection will establish to the remote host using ssh.
-           It will take user_name ,ip_address and password as arguments<br>
-           and will return the handle.
-        """
-        for key in connectargs:
-            vars( self )[ key ] = connectargs[ key ]
-
-        connect_result = super( CLI, self ).connect()
-        ssh_newkey = 'Are you sure you want to continue connecting'
-        refused = "ssh: connect to host " + \
-            self.ip_address + " port {}: Connection refused".format(self.port)
-        if self.port:
-            if os.getuid() == 0:
-                ssh_hosts_file = '/root/.ssh/known_hosts'
-            else:
-                ssh_hosts_file = os.path.join(os.getenv('HOME'), '.ssh', 'known_hosts')
-            cmd_host_remove = 'ssh-keygen -f "%s" -R [%s]:8101 2>/dev/null' %(ssh_hosts_file, self.ip_address)
-            os.system(cmd_host_remove)
-            #main.log.info('SSH host remove cmd: %s' %cmd_host_remove)
-            main.log.info('Spawning pexpect for ip %s' %self.ip_address)
-            self.handle = pexpect.spawn(
-                'ssh -p ' +
-                self.port +
-                ' ' +
-                '-o StrictHostKeyChecking=no ' +
-                self.user_name +
-                '@' +
-                self.ip_address,
-                env={ "TERM": "xterm-mono" },
-                maxread=50000 )
-        else:
-            self.handle = pexpect.spawn(
-                'ssh -X ' +
-                self.user_name +
-                '@' +
-                self.ip_address,
-                env={ "TERM": "xterm-mono" },
-                maxread=1000000,
-                timeout=60 )
-
-        self.handle.logfile = self.logfile_handler
-        i = 5
-        while i == 5:
-            i = self.handle.expect( [
-                                    ssh_newkey,
-                                    'password:|Password:',
-                                    pexpect.EOF,
-                                    pexpect.TIMEOUT,
-                                    refused,
-                                    'teston>',
-                                    '>|#|\$' ],
-                            120 )
-            if i == 0:  # Accept key, then expect either a password prompt or access
-                main.log.info( "ssh key confirmation received, send yes" )
-                self.handle.sendline( 'yes' )
-                i = 5  # Run the loop again
-                continue
-            if i == 1:  # Password required
-                if self.pwd:
-                    main.log.info(
-                    "ssh connection asked for password, gave password" )
-                else:
-                    main.log.info( "Server asked for password, but none was "
-                                    "given in the .topo file. Trying "
-                                    "no password.")
-                    self.pwd = ""
-                self.handle.sendline( self.pwd )
-                j = self.handle.expect( [
-                                        '>|#|\$',
-                                        'password:|Password:',
-                                        pexpect.EOF,
-                                        pexpect.TIMEOUT ],
-                                        120 )
-                if j != 0:
-                    main.log.error( "Incorrect Password" )
-                    return main.FALSE
-            elif i == 2:
-                main.log.error( "Connection timeout" )
-                return main.FALSE
-            elif i == 3:  # timeout
-                main.log.error(
-                    "No route to the Host " +
-                    self.user_name +
-                    "@" +
-                    self.ip_address )
-                return main.FALSE
-            elif i == 4:
-                main.log.error(
-                    "ssh: connect to host " +
-                    self.ip_address +
-                    " port 22: Connection refused" )
-                return main.FALSE
-            elif i == 6:
-                main.log.info( "Password not required logged in" )
-
-        self.handle.sendline( "" )
-        self.handle.expect( '>|#|\$' )
-        return self.handle
-
-    def disconnect( self ):
-        result = super( CLI, self ).disconnect( self )
-        result = main.TRUE
-        # self.execute( cmd="exit",timeout=120,prompt="(.*)" )
-
-    def execute( self, **execparams ):
-        """
-        It facilitates the command line execution of a given command. It has arguments as :
-        cmd => represents command to be executed,
-        prompt => represents expect command prompt or output,
-        timeout => timeout for command execution,
-        more => to provide a key press if it is on.
-
-        It will return output of command exection.
-        """
-        result = super( CLI, self ).execute( self )
-        defaultPrompt = '.*[$>\#]'
-        args = utilities.parse_args( [ "CMD",
-                                       "TIMEOUT",
-                                       "PROMPT",
-                                       "MORE" ],
-                                     **execparams )
-
-        expectPrompt = args[ "PROMPT" ] if args[ "PROMPT" ] else defaultPrompt
-        self.LASTRSP = ""
-        timeoutVar = args[ "TIMEOUT" ] if args[ "TIMEOUT" ] else 10
-        cmd = ''
-        if args[ "CMD" ]:
-            cmd = args[ "CMD" ]
-        else:
-            return 0
-        if args[ "MORE" ] is None:
-            args[ "MORE" ] = " "
-        self.handle.sendline( cmd )
-        self.lastCommand = cmd
-        index = self.handle.expect( [ expectPrompt,
-                                      "--More--",
-                                      'Command not found.',
-                                      pexpect.TIMEOUT,
-                                      "^:$" ],
-                                    timeout=timeoutVar )
-        if index == 0:
-            self.LASTRSP = self.LASTRSP + \
-                self.handle.before + self.handle.after
-            main.log.info( "Executed :" + str(cmd ) +
-                           " \t\t Expected Prompt '" + str( expectPrompt) +
-                           "' Found" )
-        elif index == 1:
-            self.LASTRSP = self.LASTRSP + self.handle.before
-            self.handle.send( args[ "MORE" ] )
-            main.log.info(
-                "Found More screen to go , Sending a key to proceed" )
-            indexMore = self.handle.expect(
-                [ "--More--", expectPrompt ], timeout=timeoutVar )
-            while indexMore == 0:
-                main.log.info(
-                    "Found anoother More screen to go , Sending a key to proceed" )
-                self.handle.send( args[ "MORE" ] )
-                indexMore = self.handle.expect(
-                    [ "--More--", expectPrompt ], timeout=timeoutVar )
-                self.LASTRSP = self.LASTRSP + self.handle.before
-        elif index == 2:
-            main.log.error( "Command not found" )
-            self.LASTRSP = self.LASTRSP + self.handle.before
-        elif index == 3:
-            main.log.error( "Expected Prompt not found, Time Out!!" )
-            main.log.error( expectPrompt )
-            self.LASTRSP = self.LASTRSP + self.handle.before
-            return self.LASTRSP
-        elif index == 4:
-            self.LASTRSP = self.LASTRSP + self.handle.before
-            # self.handle.send( args[ "MORE" ] )
-            self.handle.sendcontrol( "D" )
-            main.log.info(
-                "Found More screen to go, Sending a key to proceed" )
-            indexMore = self.handle.expect(
-                [ "^:$", expectPrompt ], timeout=timeoutVar )
-            while indexMore == 0:
-                main.log.info(
-                    "Found another More screen to go, Sending a key to proceed" )
-                self.handle.sendcontrol( "D" )
-                indexMore = self.handle.expect(
-                    [ "^:$", expectPrompt ], timeout=timeoutVar )
-                self.LASTRSP = self.LASTRSP + self.handle.before
-        main.last_response = self.remove_contol_chars( self.LASTRSP )
-        return self.LASTRSP
-
-    def remove_contol_chars( self, response ):
-        # RE_XML_ILLEGAL = '([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])'%( unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ),unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ),unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ) )
-        # response = re.sub( RE_XML_ILLEGAL, "\n", response )
-        response = re.sub( r"[\x01-\x1F\x7F]", "", response )
-        # response = re.sub( r"\[\d+\;1H", "\n", response )
-        response = re.sub( r"\[\d+\;\d+H", "", response )
-        return response
-
-    def runAsSudoUser( self, handle, pwd, default ):
-
-        i = handle.expect( [ ".ssword:*", default, pexpect.EOF ] )
-        if i == 0:
-            handle.sendline( pwd )
-            handle.sendline( "\n" )
-
-        if i == 1:
-            handle.expect( default )
-
-        if i == 2:
-            main.log.error( "Unable to run as Sudo user" )
-
-        return handle
-
-    def onfail( self ):
-        if 'onfail' in main.componentDictionary[ self.name ]:
-            commandList = main.componentDictionary[
-                self.name ][ 'onfail' ].split( "," )
-            for command in commandList:
-                response = self.execute(
-                    cmd=command,
-                    prompt="(.*)",
-                    timeout=120 )
-
-    def secureCopy( self, userName, ipAddress, filePath, dstPath, pwd="",
-                    direction="from" ):
-        """
-        Definition:
-            Execute scp command in linux to copy to/from a remote host
-        Required:
-            str userName - User name of the remote host
-            str ipAddress - IP address of the remote host
-            str filePath - File path including the file it self
-            str dstPath - Destination path
-        Optional:
-            str pwd - Password of the host
-            str direction - Direction of the scp, default to "from" which means
-                            copy "from" the remote machine to local machine,
-                            while "to" means copy "to" the remote machine from
-                            local machine
-        """
-        returnVal = main.TRUE
-        ssh_newkey = 'Are you sure you want to continue connecting'
-        refused = "ssh: connect to host " + \
-                  ipAddress + " port 22: Connection refused"
-
-        if direction == "from":
-            cmd = 'scp ' + str( userName ) + '@' + str( ipAddress ) + ':' + \
-                  str( filePath ) + ' ' + str( dstPath )
-        elif direction == "to":
-            cmd = 'scp ' + str( filePath ) + ' ' + str( userName ) + \
-                  '@' + str( ipAddress ) + ':' + str( dstPath )
-        else:
-            main.log.debug( "Wrong direction using secure copy command!" )
-            return main.FALSE
-
-        main.log.info( "Sending: " + cmd )
-        self.handle.sendline( cmd )
-        i = 0
-        while i < 2:
-            i = self.handle.expect( [
-                                ssh_newkey,
-                                'password:',
-                                "100%",
-                                refused,
-                                "No such file or directory",
-                                pexpect.EOF,
-                                pexpect.TIMEOUT ],
-                                120 )
-            if i == 0:  # ask for ssh key confirmation
-                main.log.info( "ssh key confirmation received, sending yes" )
-                self.handle.sendline( 'yes' )
-            elif i == 1:  # Asked for ssh password
-                main.log.info( "ssh connection asked for password, gave password" )
-                self.handle.sendline( pwd )
-            elif i == 2:  # File finished transfering
-                main.log.info( "Secure copy successful" )
-                returnVal = main.TRUE
-            elif i == 3:  # Connection refused
-                main.log.error(
-                    "ssh: connect to host " +
-                    ipAddress +
-                    " port 22: Connection refused" )
-                returnVal = main.FALSE
-            elif i == 4:  # File Not found
-                main.log.error( "No such file found" )
-                returnVal = main.FALSE
-            elif i == 5:  # EOF
-                main.log.error( "Pexpect.EOF found!!!" )
-                main.cleanup()
-                main.exit()
-            elif i == 6:  # timeout
-                main.log.error(
-                    "No route to the Host " +
-                    userName +
-                    "@" +
-                    ipAddress )
-                returnVal = main.FALSE
-        self.handle.expect( "\$" )
-        return returnVal
-
-    def scp( self, remoteHost, filePath, dstPath, direction="from" ):
-        """
-        Definition:
-            Execute scp command in linux to copy to/from a remote host
-        Required:
-            * remoteHost - Test ON component to be parsed
-            str filePath - File path including the file it self
-            str dstPath - Destination path
-        Optional:
-            str direction - Direction of the scp, default to "from" which means
-                            copy "from" the remote machine to local machine,
-                            while "to" means copy "to" the remote machine from
-                            local machine
-        """
-        return self.secureCopy( remoteHost.user_name,
-                                remoteHost.ip_address,
-                                filePath,
-                                dstPath,
-                                pwd=remoteHost.pwd,
-                                direction=direction )
diff --git a/src/test/cli/component.py b/src/test/cli/component.py
deleted file mode 100644
index 1c5b0f1..0000000
--- a/src/test/cli/component.py
+++ /dev/null
@@ -1,157 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-"""
-Created on 24-Oct-2012
-
-author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
-          Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
-
-
-    TestON is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 2 of the License, or
-    ( at your option ) any later version.
-
-    TestON is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
-
-
-
-"""
-import logging
-from clicommon import *
-
-class Component( object ):
-
-    """
-    This is the tempalte class for components
-    """
-    def __init__( self ):
-        self.default = ''
-        self.wrapped = sys.modules[ __name__ ]
-        self.count = 0
-
-    def __getattr__( self, name ):
-        """
-         This will invoke, if the attribute wasn't found the usual ways.
-         Here it will look for assert_attribute and will execute when
-         AttributeError occurs.
-         It will return the result of the assert_attribute.
-        """
-        try:
-            return getattr( self.wrapped, name )
-        except AttributeError as error:
-            # NOTE: The first time we load a driver module we get this error
-            if "'module' object has no attribute '__path__'" in error:
-                pass
-            else:
-                main.log.error( str(error.__class__) + " " + str(error) )
-            try:
-                def experimentHandling( *args, **kwargs ):
-                    if main.EXPERIMENTAL_MODE == main.TRUE:
-                        result = self.experimentRun( *args, **kwargs )
-                        main.log.info( "EXPERIMENTAL MODE. API " +
-                                       str( name ) +
-                                       " not yet implemented. " +
-                                       "Returning dummy values" )
-                        return result
-                    else:
-                        return main.FALSE
-                return experimentHandling
-            except TypeError as e:
-                main.log.error( "Arguments for experimental mode does not" +
-                                " have key 'retruns'" + e )
-
-    def connect( self ):
-
-        vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
-
-        session_file = main.logdir + "/" + self.name + ".session"
-        self.log_handler = logging.FileHandler( session_file )
-        self.log_handler.setLevel( logging.DEBUG )
-
-        vars( main )[ self.name + 'log' ].setLevel( logging.DEBUG )
-        _formatter = logging.Formatter(
-            "%(asctime)s  %(name)-10s: %(levelname)-8s: %(message)s" )
-        self.log_handler.setFormatter( _formatter )
-        vars( main )[ self.name + 'log' ].addHandler( self.log_handler )
-        # Adding header for the component log
-        vars( main )[ self.name + 'log' ].info( main.logHeader )
-        # Opening the session log to append command's execution output
-        self.logfile_handler = open( session_file, "w" )
-
-        return "Dummy"
-
-    def execute( self, cmd ):
-        return main.TRUE
-        # import commands
-        # return commands.getoutput( cmd )
-
-    def disconnect( self ):
-        return main.TRUE
-
-    def config( self ):
-        self = self
-        # Need to update the configuration code
-
-    def cleanup( self ):
-        return main.TRUE
-
-    def log( self, message ):
-        """
-        Here finding the for the component to which the
-        log message based on the called child object.
-        """
-        vars( main )[ self.name + 'log' ].info( "\n" + message + "\n" )
-
-    def close_log_handles( self ):
-        vars( main )[ self.name + 'log' ].removeHandler( self.log_handler )
-        if self.logfile_handler:
-            self.logfile_handler.close()
-
-    def get_version( self ):
-        return "Version unknown"
-
-    def experimentRun( self, *args, **kwargs ):
-        # FIXME handle *args
-        args = utilities.parse_args( [ "RETURNS" ], **kwargs )
-        return args[ "RETURNS" ]
-
-
-if __name__ != "__main__":
-    import sys
-    sys.modules[ __name__ ] = Component()
diff --git a/src/test/cli/onosclidriver.py b/src/test/cli/onosclidriver.py
deleted file mode 100644
index ca35427..0000000
--- a/src/test/cli/onosclidriver.py
+++ /dev/null
@@ -1,4838 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-This driver enters the onos> prompt to issue commands.
-
-Please follow the coding style demonstrated by existing
-functions and document properly.
-
-If you are a contributor to the driver, please
-list your email here for future contact:
-
-jhall@onlab.us
-andrew@onlab.us
-shreya@onlab.us
-
-OCT 13 2014
-
-"""
-import pexpect
-import re
-import json
-import types
-import time
-import os
-from clidriver import CLI
-from clicommon import *
-
-class OnosCliDriver( CLI ):
-
-    def __init__( self, controller = None, connect = True):
-        """
-        Initialize client
-        """
-        self.name = None
-        self.home = None
-        self.handle = None
-	if controller is not None:
-            self.controller = controller
-	else:
-            self.controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
-            self.controller = self.controller.split(',')[0]
-        super( CLI, self ).__init__()
-        if connect == True:
-            self.connect_cli()
-
-    def connect_cli(self):
-        options = { 'name' : 'onoscli', 'onosIp': '{0}'.format(self.controller) }
-        main.log.info('Connecting to controller at %s' %self.controller)
-        self.connect(name = options['name'], user_name = 'onos', pwd = 'rocks',
-                     ip_address = self.controller, port = '8101', options = options)
-
-    def connect( self, **connectargs ):
-        """
-        Creates ssh handle for ONOS cli.
-        """
-        try:
-            for key in connectargs:
-                vars( self )[ key ] = connectargs[ key ]
-            self.home = "~/onos"
-            for key in self.options:
-                if key == "home":
-                    self.home = self.options[ 'home' ]
-                    break
-            if self.home is None or self.home == "":
-                self.home = "~/onos"
-
-            for key in self.options:
-                if key == 'onosIp':
-                    self.onosIp = self.options[ 'onosIp' ]
-                    break
-
-            self.name = self.options[ 'name' ]
-
-            try:
-                if os.getenv( str( self.ip_address ) ) is not None:
-                    self.ip_address = os.getenv( str( self.ip_address ) )
-                else:
-                    main.log.info( self.name +
-                                   ": Trying to connect to " +
-                                   self.ip_address )
-
-            except KeyError:
-                main.log.info( "Invalid host name," +
-                               " connecting to local host instead" )
-                self.ip_address = 'localhost'
-            except Exception as inst:
-                main.log.error( "Uncaught exception: " + str( inst ) )
-
-            self.handle = super( OnosCliDriver, self ).connect(
-                user_name=self.user_name,
-                ip_address=self.ip_address,
-                port=self.port,
-                pwd=self.pwd,
-                home=self.home )
-
-            #self.handle.sendline( "cd " + self.home )
-            #self.handle.expect( "\$" )
-            if self.handle:
-                return self.handle
-            else:
-                main.log.info( "NO ONOS HANDLE" )
-                return main.FALSE
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def disconnect( self ):
-        """
-        Called when Test is complete to disconnect the ONOS handle.
-        """
-        response = main.TRUE
-        try:
-            if self.handle:
-                i = self.logout()
-                if i == main.TRUE:
-                    self.handle.sendline( "" )
-                    self.handle.expect( "\$" )
-                    self.handle.sendline( "exit" )
-                    self.handle.expect( "closed" )
-                self.close_log_handles()
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            response = main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":     " + self.handle.before )
-        except ValueError:
-            main.log.exception( "Exception in disconnect of " + self.name )
-            response = main.TRUE
-        except Exception:
-            main.log.exception( self.name + ": Connection failed to the host" )
-            response = main.FALSE
-        return response
-
-    def logout( self ):
-        """
-        Sends 'logout' command to ONOS cli
-        Returns main.TRUE if exited CLI and
-                main.FALSE on timeout (not guranteed you are disconnected)
-                None on TypeError
-                Exits test on unknown error or pexpect exits unexpectedly
-        """
-        try:
-            if self.handle:
-                self.handle.sendline( "" )
-                i = self.handle.expect( [ "onos>", "\$", pexpect.TIMEOUT ],
-                                        timeout=10 )
-                if i == 0:  # In ONOS CLI
-                    self.handle.sendline( "logout" )
-                    j = self.handle.expect( [ "\$",
-                                              "Command not found:",
-                                              pexpect.TIMEOUT ] )
-                    if j == 0:  # Successfully logged out
-                        return main.TRUE
-                    elif j == 1 or j == 2:
-                        # ONOS didn't fully load, and logout command isn't working
-                        # or the command timed out
-                        self.handle.send( "\x04" )  # send ctrl-d
-                        self.handle.expect( "\$" )
-                        return main.TRUE
-                    else: # some other output
-                        main.log.warn( "Unknown repsonse to logout command: '{}'",
-                                       repr( self.handle.before ) )
-                        return main.FALSE
-                elif i == 1:  # not in CLI
-                    return main.TRUE
-                elif i == 3:  # Timeout
-                    return main.FALSE
-            else:
-                return main.TRUE
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": eof exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except ValueError:
-            main.log.error( self.name +
-                            "ValueError exception in logout method" )
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setCell( self, cellname ):
-        """
-        Calls 'cell <name>' to set the environment variables on ONOSbench
-
-        Before issuing any cli commands, set the environment variable first.
-        """
-        try:
-            if not cellname:
-                main.log.error( "Must define cellname" )
-                main.cleanup()
-                main.exit()
-            else:
-                self.handle.sendline( "cell " + str( cellname ) )
-                # Expect the cellname in the ONOSCELL variable.
-                # Note that this variable name is subject to change
-                #   and that this driver will have to change accordingly
-                self.handle.expect(str(cellname))
-                handleBefore = self.handle.before
-                handleAfter = self.handle.after
-                # Get the rest of the handle
-                self.handle.sendline("")
-                self.handle.expect("\$")
-                handleMore = self.handle.before
-
-                main.log.info( "Cell call returned: " + handleBefore +
-                               handleAfter + handleMore )
-
-                return main.TRUE
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": eof exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def startOnosCli( self, ONOSIp, karafTimeout="",
-                      commandlineTimeout=10, onosStartTimeout=60 ):
-        """
-        karafTimeout is an optional argument. karafTimeout value passed
-        by user would be used to set the current karaf shell idle timeout.
-        Note that when ever this property is modified the shell will exit and
-        the subsequent login would reflect new idle timeout.
-        Below is an example to start a session with 60 seconds idle timeout
-        ( input value is in milliseconds ):
-
-        tValue = "60000"
-        main.ONOScli1.startOnosCli( ONOSIp, karafTimeout=tValue )
-
-        Note: karafTimeout is left as str so that this could be read
-        and passed to startOnosCli from PARAMS file as str.
-        """
-        self.onosIp = ONOSIp
-        try:
-            self.handle.sendline( "" )
-            x = self.handle.expect( [
-                "\$", "onos>" ], commandlineTimeout)
-
-            if x == 1:
-                main.log.info( "ONOS cli is already running" )
-                return main.TRUE
-
-            # Wait for onos start ( -w ) and enter onos cli
-            self.handle.sendline( "onos -w " + str( ONOSIp ) )
-            i = self.handle.expect( [
-                "onos>",
-                pexpect.TIMEOUT ], onosStartTimeout )
-
-            if i == 0:
-                main.log.info( str( ONOSIp ) + " CLI Started successfully" )
-                if karafTimeout:
-                    self.handle.sendline(
-                        "config:property-set -p org.apache.karaf.shell\
-                                 sshIdleTimeout " +
-                        karafTimeout )
-                    self.handle.expect( "\$" )
-                    self.handle.sendline( "onos -w " + str( ONOSIp ) )
-                    self.handle.expect( "onos>" )
-                return main.TRUE
-            else:
-                # If failed, send ctrl+c to process and try again
-                main.log.info( "Starting CLI failed. Retrying..." )
-                self.handle.send( "\x03" )
-                self.handle.sendline( "onos -w " + str( ONOSIp ) )
-                i = self.handle.expect( [ "onos>", pexpect.TIMEOUT ],
-                                        timeout=30 )
-                if i == 0:
-                    main.log.info( str( ONOSIp ) + " CLI Started " +
-                                   "successfully after retry attempt" )
-                    if karafTimeout:
-                        self.handle.sendline(
-                            "config:property-set -p org.apache.karaf.shell\
-                                    sshIdleTimeout " +
-                            karafTimeout )
-                        self.handle.expect( "\$" )
-                        self.handle.sendline( "onos -w " + str( ONOSIp ) )
-                        self.handle.expect( "onos>" )
-                    return main.TRUE
-                else:
-                    main.log.error( "Connection to CLI " +
-                                    str( ONOSIp ) + " timeout" )
-                    return main.FALSE
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def logSet( self, level="INFO", app="org.onosproject" ):
-        """
-        Set the logging level to lvl for a specific app
-        returns main.TRUE on success
-        returns main.FALSE if Error occurred
-        if noExit is True, TestON will not exit, but clean up
-        Available level: DEBUG, TRACE, INFO, WARN, ERROR
-        Level defaults to INFO
-        """
-        try:
-            self.handle.sendline( "log:set %s %s" %( level, app ) )
-            self.handle.expect( "onos>" )
-
-            response = self.handle.before
-            if re.search( "Error", response ):
-                return main.FALSE
-            return main.TRUE
-        except pexpect.TIMEOUT:
-            main.log.exception( self.name + ": TIMEOUT exception found" )
-            main.cleanup()
-            main.exit()
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def log( self, cmdStr, level="" ):
-        """
-            log  the commands in the onos CLI.
-            returns main.TRUE on success
-            returns main.FALSE if Error occurred
-            Available level: DEBUG, TRACE, INFO, WARN, ERROR
-            Level defaults to INFO
-        """
-        try:
-            lvlStr = ""
-            if level:
-                lvlStr = "--level=" + level
-
-            self.handle.sendline( "" )
-            i = self.handle.expect( [ "onos>", "\$", pexpect.TIMEOUT ] )
-            if i == 1:
-                main.log.error( self.name + ": onos cli session closed. ")
-                if self.onosIp:
-                    main.log.warn( "Trying to reconnect " + self.onosIp )
-                    reconnectResult = self.startOnosCli( self.onosIp )
-                    if reconnectResult:
-                        main.log.info( self.name + ": onos cli session reconnected." )
-                    else:
-                        main.log.error( self.name + ": reconnection failed." )
-                        main.cleanup()
-                        main.exit()
-                else:
-                    main.cleanup()
-                    main.exit()
-            if i == 2:
-                self.handle.sendline( "" )
-                self.handle.expect( "onos>" )
-            self.handle.sendline( "log:log " + lvlStr + " " + cmdStr )
-            self.handle.expect( "log:log" )
-            self.handle.expect( "onos>" )
-
-            response = self.handle.before
-            if re.search( "Error", response ):
-                return main.FALSE
-            return main.TRUE
-        except pexpect.TIMEOUT:
-            main.log.exception( self.name + ": TIMEOUT exception found" )
-            main.cleanup()
-            main.exit()
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10 ):
-        """
-        Send a completely user specified string to
-        the onos> prompt. Use this function if you have
-        a very specific command to send.
-
-        Warning: There are no sanity checking to commands
-        sent using this method.
-
-        """
-        try:
-            logStr = "\"Sending CLI command: '" + cmdStr + "'\""
-            self.log( logStr )
-            self.handle.sendline( cmdStr )
-            i = self.handle.expect( ["onos>", "\$"], timeout )
-            response = self.handle.before
-            # TODO: do something with i
-            main.log.info( "Command '" + str( cmdStr ) + "' sent to "
-                           + self.name + "." )
-            if debug:
-                main.log.debug( self.name + ": Raw output" )
-                main.log.debug( self.name + ": " + repr( response ) )
-
-            # Remove ANSI color control strings from output
-            ansiEscape = re.compile( r'\x1b[^m]*m' )
-            response = ansiEscape.sub( '', response )
-            if debug:
-                main.log.debug( self.name + ": ansiEscape output" )
-                main.log.debug( self.name + ": " + repr( response ) )
-
-            # Remove extra return chars that get added
-            response = re.sub(  r"\s\r", "", response )
-            if debug:
-                main.log.debug( self.name + ": Removed extra returns " +
-                                "from output" )
-                main.log.debug( self.name + ": " + repr( response ) )
-
-            # Strip excess whitespace
-            response = response.strip()
-            if debug:
-                main.log.debug( self.name + ": parsed and stripped output" )
-                main.log.debug( self.name + ": " + repr( response ) )
-
-            # parse for just the output, remove the cmd from response
-            output = response.split( cmdStr.strip(), 1 )
-            if debug:
-                main.log.debug( self.name + ": split output" )
-                for r in output:
-                    main.log.debug( self.name + ": " + repr( r ) )
-            output = output[1].strip()
-            if showResponse:
-                main.log.info( "Response from ONOS: {}".format( output ) )
-            return output
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ":ONOS timeout" )
-            if debug:
-                main.log.debug( self.handle.before )
-            return None
-        except IndexError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def shutdown( self, timeout = 5):
-        """
-        Shuts down ONOS
-        """
-        try:
-            self.handle.sendline("shutdown now")
-            self.handle.expect("yes/no", timeout = timeout)
-            self.handle.sendline("yes")
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    # IMPORTANT NOTE:
-    # For all cli commands, naming convention should match
-    # the cli command changing 'a:b' with 'aB'.
-    # Ex ) onos:topology > onosTopology
-    #    onos:links    > onosLinks
-    #    feature:list  > featureList
-
-    def addNode( self, nodeId, ONOSIp, tcpPort="" ):
-        """
-        Adds a new cluster node by ID and address information.
-        Required:
-            * nodeId
-            * ONOSIp
-        Optional:
-            * tcpPort
-        """
-        try:
-            cmdStr = "add-node " + str( nodeId ) + " " +\
-                str( ONOSIp ) + " " + str( tcpPort )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding node" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                main.log.info( "Node " + str( ONOSIp ) + " added" )
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def removeNode( self, nodeId ):
-        """
-        Removes a cluster by ID
-        Issues command: 'remove-node [<node-id>]'
-        Required:
-            * nodeId
-        """
-        try:
-
-            cmdStr = "remove-node " + str( nodeId )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in removing node" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def nodes( self, jsonFormat=True):
-        """
-        List the nodes currently visible
-        Issues command: 'nodes'
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "nodes"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def topology( self ):
-        """
-        Definition:
-            Returns the output of topology command.
-        Return:
-            topology = current ONOS topology
-        """
-        try:
-            cmdStr = "topology -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            main.log.info( cmdStr + " returned: " + str( handle ) )
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def deviceRemove( self, deviceId ):
-        """
-        Removes particular device from storage
-
-        TODO: refactor this function
-        """
-        try:
-            cmdStr = "device-remove " + str( deviceId )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in removing device" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def devices( self, jsonFormat=True ):
-        """
-        Lists all infrastructure devices or switches
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "devices"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def balanceMasters( self ):
-        """
-        This balances the devices across all controllers
-        by issuing command: 'onos> onos:balance-masters'
-        If required this could be extended to return devices balanced output.
-        """
-        try:
-            cmdStr = "onos:balance-masters"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in balancing masters" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def checkMasters( self, jsonFormat=True  ):
-        """
-            Returns the output of the masters command.
-            Optional argument:
-                * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "onos:masters"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def checkBalanceMasters( self, jsonFormat=True ):
-        """
-            Uses the master command to check that the devices' leadership
-            is evenly divided
-
-            Dependencies: checkMasters() and summary()
-
-            Returns main.True if the devices are balanced
-            Returns main.False if the devices are unbalanced
-            Exits on Exception
-            Returns None on TypeError
-        """
-        try:
-            summaryOutput = self.summary()
-            totalDevices = json.loads( summaryOutput )[ "devices" ]
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, summaryOutput ) )
-            return None
-        try:
-            totalOwnedDevices = 0
-            mastersOutput = self.checkMasters()
-            masters = json.loads( mastersOutput )
-            first = masters[ 0 ][ "size" ]
-            for master in masters:
-                totalOwnedDevices += master[ "size" ]
-                if master[ "size" ] > first + 1 or master[ "size" ] < first - 1:
-                    main.log.error( "Mastership not balanced" )
-                    main.log.info( "\n" + self.checkMasters( False ) )
-                    return main.FALSE
-            main.log.info( "Mastership balanced between " \
-                            + str( len(masters) ) + " masters" )
-            return main.TRUE
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, mastersOutput ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def links( self, jsonFormat=True ):
-        """
-        Lists all core links
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "links"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def ports( self, jsonFormat=True ):
-        """
-        Lists all ports
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "ports"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def roles( self, jsonFormat=True ):
-        """
-        Lists all devices and the controllers with roles assigned to them
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "roles"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getRole( self, deviceId ):
-        """
-        Given the a string containing the json representation of the "roles"
-        cli command and a partial or whole device id, returns a json object
-        containing the roles output for the first device whose id contains
-        "device_id"
-
-        Returns:
-        A dict of the role assignments for the given device or
-        None if no match
-        """
-        try:
-            if deviceId is None:
-                return None
-            else:
-                rawRoles = self.roles()
-                rolesJson = json.loads( rawRoles )
-                # search json for the device with id then return the device
-                for device in rolesJson:
-                    # print device
-                    if str( deviceId ) in device[ 'id' ]:
-                        return device
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawRoles ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def rolesNotNull( self ):
-        """
-        Iterates through each device and checks if there is a master assigned
-        Returns: main.TRUE if each device has a master
-                 main.FALSE any device has no master
-        """
-        try:
-            rawRoles = self.roles()
-            rolesJson = json.loads( rawRoles )
-            # search json for the device with id then return the device
-            for device in rolesJson:
-                # print device
-                if device[ 'master' ] == "none":
-                    main.log.warn( "Device has no master: " + str( device ) )
-                    return main.FALSE
-            return main.TRUE
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawRoles ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def paths( self, srcId, dstId ):
-        """
-        Returns string of paths, and the cost.
-        Issues command: onos:paths <src> <dst>
-        """
-        try:
-            cmdStr = "onos:paths " + str( srcId ) + " " + str( dstId )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in getting paths" )
-                return ( handle, "Error" )
-            else:
-                path = handle.split( ";" )[ 0 ]
-                cost = handle.split( ";" )[ 1 ]
-                return ( path, cost )
-        except AssertionError:
-            main.log.exception( "" )
-            return ( handle, "Error" )
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return ( handle, "Error" )
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def hosts( self, jsonFormat=True ):
-        """
-        Lists all discovered hosts
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "hosts"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in handle
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in handle
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( handle ) )
-                return None
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getHost( self, mac ):
-        """
-        Return the first host from the hosts api whose 'id' contains 'mac'
-
-        Note: mac must be a colon separated mac address, but could be a
-              partial mac address
-
-        Return None if there is no match
-        """
-        try:
-            if mac is None:
-                return None
-            else:
-                mac = mac
-                rawHosts = self.hosts()
-                hostsJson = json.loads( rawHosts )
-                # search json for the host with mac then return the device
-                for host in hostsJson:
-                    # print "%s in  %s?" % ( mac, host[ 'id' ] )
-                    if not host:
-                        pass
-                    elif mac in host[ 'id' ]:
-                        return host
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawHosts ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getHostsId( self, hostList ):
-        """
-        Obtain list of hosts
-        Issues command: 'onos> hosts'
-
-        Required:
-            * hostList: List of hosts obtained by Mininet
-        IMPORTANT:
-            This function assumes that you started your
-            topology with the option '--mac'.
-            Furthermore, it assumes that value of VLAN is '-1'
-        Description:
-            Converts mininet hosts ( h1, h2, h3... ) into
-            ONOS format ( 00:00:00:00:00:01/-1 , ... )
-        """
-        try:
-            onosHostList = []
-
-            for host in hostList:
-                host = host.replace( "h", "" )
-                hostHex = hex( int( host ) ).zfill( 12 )
-                hostHex = str( hostHex ).replace( 'x', '0' )
-                i = iter( str( hostHex ) )
-                hostHex = ":".join( a + b for a, b in zip( i, i ) )
-                hostHex = hostHex + "/-1"
-                onosHostList.append( hostHex )
-
-            return onosHostList
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addHostIntent( self, hostIdOne, hostIdTwo ):
-        """
-        Required:
-            * hostIdOne: ONOS host id for host1
-            * hostIdTwo: ONOS host id for host2
-        Description:
-            Adds a host-to-host intent ( bidirectional ) by
-            specifying the two hosts.
-        Returns:
-            A string of the intent id or None on Error
-        """
-        try:
-            cmdStr = "add-host-intent " + str( hostIdOne ) +\
-                " " + str( hostIdTwo )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding Host intent" )
-                main.log.debug( "Response from ONOS was: " + repr( handle ) )
-                return None
-            else:
-                main.log.info( "Host intent installed between " +
-                               str( hostIdOne ) + " and " + str( hostIdTwo ) )
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    main.log.debug( "Response from ONOS was: " +
-                                    repr( handle ) )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addOpticalIntent( self, ingressDevice, egressDevice ):
-        """
-        Required:
-            * ingressDevice: device id of ingress device
-            * egressDevice: device id of egress device
-        Optional:
-            TODO: Still needs to be implemented via dev side
-        Description:
-            Adds an optical intent by specifying an ingress and egress device
-        Returns:
-            A string of the intent id or None on error
-        """
-        try:
-            cmdStr = "add-optical-intent " + str( ingressDevice ) +\
-                " " + str( egressDevice )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            # If error, return error message
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding Optical intent" )
-                return None
-            else:
-                main.log.info( "Optical intent installed between " +
-                               str( ingressDevice ) + " and " +
-                               str( egressDevice ) )
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addPointIntent(
-            self,
-            ingressDevice,
-            egressDevice,
-            portIngress="",
-            portEgress="",
-            ethType="",
-            ethSrc="",
-            ethDst="",
-            bandwidth="",
-            lambdaAlloc=False,
-            ipProto="",
-            ipSrc="",
-            ipDst="",
-            tcpSrc="",
-            tcpDst="" ):
-        """
-        Required:
-            * ingressDevice: device id of ingress device
-            * egressDevice: device id of egress device
-        Optional:
-            * ethType: specify ethType
-            * ethSrc: specify ethSrc ( i.e. src mac addr )
-            * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link
-            * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent
-            * ipProto: specify ip protocol
-            * ipSrc: specify ip source address
-            * ipDst: specify ip destination address
-            * tcpSrc: specify tcp source port
-            * tcpDst: specify tcp destination port
-        Description:
-            Adds a point-to-point intent ( uni-directional ) by
-            specifying device id's and optional fields
-        Returns:
-            A string of the intent id or None on error
-
-        NOTE: This function may change depending on the
-              options developers provide for point-to-point
-              intent via cli
-        """
-        try:
-            # If there are no optional arguments
-            if not ethType and not ethSrc and not ethDst\
-                    and not bandwidth and not lambdaAlloc \
-                    and not ipProto and not ipSrc and not ipDst \
-                    and not tcpSrc and not tcpDst:
-                cmd = "add-point-intent"
-
-            else:
-                cmd = "add-point-intent"
-
-                if ethType:
-                    cmd += " --ethType " + str( ethType )
-                if ethSrc:
-                    cmd += " --ethSrc " + str( ethSrc )
-                if ethDst:
-                    cmd += " --ethDst " + str( ethDst )
-                if bandwidth:
-                    cmd += " --bandwidth " + str( bandwidth )
-                if lambdaAlloc:
-                    cmd += " --lambda "
-                if ipProto:
-                    cmd += " --ipProto " + str( ipProto )
-                if ipSrc:
-                    cmd += " --ipSrc " + str( ipSrc )
-                if ipDst:
-                    cmd += " --ipDst " + str( ipDst )
-                if tcpSrc:
-                    cmd += " --tcpSrc " + str( tcpSrc )
-                if tcpDst:
-                    cmd += " --tcpDst " + str( tcpDst )
-
-            # Check whether the user appended the port
-            # or provided it as an input
-            if "/" in ingressDevice:
-                cmd += " " + str( ingressDevice )
-            else:
-                if not portIngress:
-                    main.log.error( "You must specify the ingress port" )
-                    # TODO: perhaps more meaningful return
-                    #       Would it make sense to throw an exception and exit
-                    #       the test?
-                    return None
-
-                cmd += " " + \
-                    str( ingressDevice ) + "/" +\
-                    str( portIngress ) + " "
-
-            if "/" in egressDevice:
-                cmd += " " + str( egressDevice )
-            else:
-                if not portEgress:
-                    main.log.error( "You must specify the egress port" )
-                    return None
-
-                cmd += " " +\
-                    str( egressDevice ) + "/" +\
-                    str( portEgress )
-
-            handle = self.sendline( cmd )
-            assert "Command not found:" not in handle, handle
-            # If error, return error message
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding point-to-point intent" )
-                return None
-            else:
-                # TODO: print out all the options in this message?
-                main.log.info( "Point-to-point intent installed between " +
-                               str( ingressDevice ) + " and " +
-                               str( egressDevice ) )
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addMultipointToSinglepointIntent(
-            self,
-            ingressDeviceList,
-            egressDevice,
-            portIngressList=None,
-            portEgress="",
-            ethType="",
-            ethSrc="",
-            ethDst="",
-            bandwidth="",
-            lambdaAlloc=False,
-            ipProto="",
-            ipSrc="",
-            ipDst="",
-            tcpSrc="",
-            tcpDst="",
-            setEthSrc="",
-            setEthDst="" ):
-        """
-        Note:
-            This function assumes the format of all ingress devices
-            is same. That is, all ingress devices include port numbers
-            with a "/" or all ingress devices could specify device
-            ids and port numbers seperately.
-        Required:
-            * ingressDeviceList: List of device ids of ingress device
-                ( Atleast 2 ingress devices required in the list )
-            * egressDevice: device id of egress device
-        Optional:
-            * ethType: specify ethType
-            * ethSrc: specify ethSrc ( i.e. src mac addr )
-            * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link
-            * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent
-            * ipProto: specify ip protocol
-            * ipSrc: specify ip source address
-            * ipDst: specify ip destination address
-            * tcpSrc: specify tcp source port
-            * tcpDst: specify tcp destination port
-            * setEthSrc: action to Rewrite Source MAC Address
-            * setEthDst: action to Rewrite Destination MAC Address
-        Description:
-            Adds a multipoint-to-singlepoint intent ( uni-directional ) by
-            specifying device id's and optional fields
-        Returns:
-            A string of the intent id or None on error
-
-        NOTE: This function may change depending on the
-              options developers provide for multipoint-to-singlepoint
-              intent via cli
-        """
-        try:
-            # If there are no optional arguments
-            if not ethType and not ethSrc and not ethDst\
-                    and not bandwidth and not lambdaAlloc\
-                    and not ipProto and not ipSrc and not ipDst\
-                    and not tcpSrc and not tcpDst and not setEthSrc\
-                    and not setEthDst:
-                cmd = "add-multi-to-single-intent"
-
-            else:
-                cmd = "add-multi-to-single-intent"
-
-                if ethType:
-                    cmd += " --ethType " + str( ethType )
-                if ethSrc:
-                    cmd += " --ethSrc " + str( ethSrc )
-                if ethDst:
-                    cmd += " --ethDst " + str( ethDst )
-                if bandwidth:
-                    cmd += " --bandwidth " + str( bandwidth )
-                if lambdaAlloc:
-                    cmd += " --lambda "
-                if ipProto:
-                    cmd += " --ipProto " + str( ipProto )
-                if ipSrc:
-                    cmd += " --ipSrc " + str( ipSrc )
-                if ipDst:
-                    cmd += " --ipDst " + str( ipDst )
-                if tcpSrc:
-                    cmd += " --tcpSrc " + str( tcpSrc )
-                if tcpDst:
-                    cmd += " --tcpDst " + str( tcpDst )
-                if setEthSrc:
-                    cmd += " --setEthSrc " + str( setEthSrc )
-                if setEthDst:
-                    cmd += " --setEthDst " + str( setEthDst )
-
-            # Check whether the user appended the port
-            # or provided it as an input
-
-            if portIngressList is None:
-                for ingressDevice in ingressDeviceList:
-                    if "/" in ingressDevice:
-                        cmd += " " + str( ingressDevice )
-                    else:
-                        main.log.error( "You must specify " +
-                                        "the ingress port" )
-                        # TODO: perhaps more meaningful return
-                        return main.FALSE
-            else:
-                if len( ingressDeviceList ) == len( portIngressList ):
-                    for ingressDevice, portIngress in zip( ingressDeviceList,
-                                                           portIngressList ):
-                        cmd += " " + \
-                            str( ingressDevice ) + "/" +\
-                            str( portIngress ) + " "
-                else:
-                    main.log.error( "Device list and port list does not " +
-                                    "have the same length" )
-                    return main.FALSE
-            if "/" in egressDevice:
-                cmd += " " + str( egressDevice )
-            else:
-                if not portEgress:
-                    main.log.error( "You must specify " +
-                                    "the egress port" )
-                    return main.FALSE
-
-                cmd += " " +\
-                    str( egressDevice ) + "/" +\
-                    str( portEgress )
-            handle = self.sendline( cmd )
-            assert "Command not found:" not in handle, handle
-            # If error, return error message
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding multipoint-to-singlepoint " +
-                                "intent" )
-                return None
-            else:
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addSinglepointToMultipointIntent(
-            self,
-            ingressDevice,
-            egressDeviceList,
-            portIngress="",
-            portEgressList=None,
-            ethType="",
-            ethSrc="",
-            ethDst="",
-            bandwidth="",
-            lambdaAlloc=False,
-            ipProto="",
-            ipSrc="",
-            ipDst="",
-            tcpSrc="",
-            tcpDst="",
-            setEthSrc="",
-            setEthDst="" ):
-        """
-        Note:
-            This function assumes the format of all egress devices
-            is same. That is, all egress devices include port numbers
-            with a "/" or all egress devices could specify device
-            ids and port numbers seperately.
-        Required:
-            * EgressDeviceList: List of device ids of egress device
-                ( Atleast 2 eress devices required in the list )
-            * ingressDevice: device id of ingress device
-        Optional:
-            * ethType: specify ethType
-            * ethSrc: specify ethSrc ( i.e. src mac addr )
-            * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link
-            * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent
-            * ipProto: specify ip protocol
-            * ipSrc: specify ip source address
-            * ipDst: specify ip destination address
-            * tcpSrc: specify tcp source port
-            * tcpDst: specify tcp destination port
-            * setEthSrc: action to Rewrite Source MAC Address
-            * setEthDst: action to Rewrite Destination MAC Address
-        Description:
-            Adds a singlepoint-to-multipoint intent ( uni-directional ) by
-            specifying device id's and optional fields
-        Returns:
-            A string of the intent id or None on error
-
-        NOTE: This function may change depending on the
-              options developers provide for singlepoint-to-multipoint
-              intent via cli
-        """
-        try:
-            # If there are no optional arguments
-            if not ethType and not ethSrc and not ethDst\
-                    and not bandwidth and not lambdaAlloc\
-                    and not ipProto and not ipSrc and not ipDst\
-                    and not tcpSrc and not tcpDst and not setEthSrc\
-                    and not setEthDst:
-                cmd = "add-single-to-multi-intent"
-
-            else:
-                cmd = "add-single-to-multi-intent"
-
-                if ethType:
-                    cmd += " --ethType " + str( ethType )
-                if ethSrc:
-                    cmd += " --ethSrc " + str( ethSrc )
-                if ethDst:
-                    cmd += " --ethDst " + str( ethDst )
-                if bandwidth:
-                    cmd += " --bandwidth " + str( bandwidth )
-                if lambdaAlloc:
-                    cmd += " --lambda "
-                if ipProto:
-                    cmd += " --ipProto " + str( ipProto )
-                if ipSrc:
-                    cmd += " --ipSrc " + str( ipSrc )
-                if ipDst:
-                    cmd += " --ipDst " + str( ipDst )
-                if tcpSrc:
-                    cmd += " --tcpSrc " + str( tcpSrc )
-                if tcpDst:
-                    cmd += " --tcpDst " + str( tcpDst )
-                if setEthSrc:
-                    cmd += " --setEthSrc " + str( setEthSrc )
-                if setEthDst:
-                    cmd += " --setEthDst " + str( setEthDst )
-
-            # Check whether the user appended the port
-            # or provided it as an input
-
-            if "/" in ingressDevice:
-                cmd += " " + str( ingressDevice )
-            else:
-                if not portIngress:
-                    main.log.error( "You must specify " +
-                                    "the Ingress port" )
-                    return main.FALSE
-
-                cmd += " " +\
-                    str( ingressDevice ) + "/" +\
-                    str( portIngress )
-
-            if portEgressList is None:
-                for egressDevice in egressDeviceList:
-                    if "/" in egressDevice:
-                        cmd += " " + str( egressDevice )
-                    else:
-                        main.log.error( "You must specify " +
-                                        "the egress port" )
-                        # TODO: perhaps more meaningful return
-                        return main.FALSE
-            else:
-                if len( egressDeviceList ) == len( portEgressList ):
-                    for egressDevice, portEgress in zip( egressDeviceList,
-                                                         portEgressList ):
-                        cmd += " " + \
-                            str( egressDevice ) + "/" +\
-                            str( portEgress )
-                else:
-                    main.log.error( "Device list and port list does not " +
-                                    "have the same length" )
-                    return main.FALSE
-            handle = self.sendline( cmd )
-            assert "Command not found:" not in handle, handle
-            # If error, return error message
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding singlepoint-to-multipoint " +
-                                "intent" )
-                return None
-            else:
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def addMplsIntent(
-            self,
-            ingressDevice,
-            egressDevice,
-            ingressPort="",
-            egressPort="",
-            ethType="",
-            ethSrc="",
-            ethDst="",
-            bandwidth="",
-            lambdaAlloc=False,
-            ipProto="",
-            ipSrc="",
-            ipDst="",
-            tcpSrc="",
-            tcpDst="",
-            ingressLabel="",
-            egressLabel="",
-            priority=""):
-        """
-        Required:
-            * ingressDevice: device id of ingress device
-            * egressDevice: device id of egress device
-        Optional:
-            * ethType: specify ethType
-            * ethSrc: specify ethSrc ( i.e. src mac addr )
-            * ethDst: specify ethDst ( i.e. dst mac addr )
-            * bandwidth: specify bandwidth capacity of link
-            * lambdaAlloc: if True, intent will allocate lambda
-              for the specified intent
-            * ipProto: specify ip protocol
-            * ipSrc: specify ip source address
-            * ipDst: specify ip destination address
-            * tcpSrc: specify tcp source port
-            * tcpDst: specify tcp destination port
-            * ingressLabel: Ingress MPLS label
-            * egressLabel: Egress MPLS label
-        Description:
-            Adds MPLS intent by
-            specifying device id's and optional fields
-        Returns:
-            A string of the intent id or None on error
-
-        NOTE: This function may change depending on the
-              options developers provide for MPLS
-              intent via cli
-        """
-        try:
-            # If there are no optional arguments
-            if not ethType and not ethSrc and not ethDst\
-                    and not bandwidth and not lambdaAlloc \
-                    and not ipProto and not ipSrc and not ipDst \
-                    and not tcpSrc and not tcpDst and not ingressLabel \
-                    and not egressLabel:
-                cmd = "add-mpls-intent"
-
-            else:
-                cmd = "add-mpls-intent"
-
-                if ethType:
-                    cmd += " --ethType " + str( ethType )
-                if ethSrc:
-                    cmd += " --ethSrc " + str( ethSrc )
-                if ethDst:
-                    cmd += " --ethDst " + str( ethDst )
-                if bandwidth:
-                    cmd += " --bandwidth " + str( bandwidth )
-                if lambdaAlloc:
-                    cmd += " --lambda "
-                if ipProto:
-                    cmd += " --ipProto " + str( ipProto )
-                if ipSrc:
-                    cmd += " --ipSrc " + str( ipSrc )
-                if ipDst:
-                    cmd += " --ipDst " + str( ipDst )
-                if tcpSrc:
-                    cmd += " --tcpSrc " + str( tcpSrc )
-                if tcpDst:
-                    cmd += " --tcpDst " + str( tcpDst )
-                if ingressLabel:
-                    cmd += " --ingressLabel " + str( ingressLabel )
-                if egressLabel:
-                    cmd += " --egressLabel " + str( egressLabel )
-                if priority:
-                    cmd += " --priority " + str( priority )
-
-            # Check whether the user appended the port
-            # or provided it as an input
-            if "/" in ingressDevice:
-                cmd += " " + str( ingressDevice )
-            else:
-                if not ingressPort:
-                    main.log.error( "You must specify the ingress port" )
-                    return None
-
-                cmd += " " + \
-                    str( ingressDevice ) + "/" +\
-                    str( ingressPort ) + " "
-
-            if "/" in egressDevice:
-                cmd += " " + str( egressDevice )
-            else:
-                if not egressPort:
-                    main.log.error( "You must specify the egress port" )
-                    return None
-
-                cmd += " " +\
-                    str( egressDevice ) + "/" +\
-                    str( egressPort )
-
-            handle = self.sendline( cmd )
-            assert "Command not found:" not in handle, handle
-            # If error, return error message
-            if re.search( "Error", handle ):
-                main.log.error( "Error in adding mpls intent" )
-                return None
-            else:
-                # TODO: print out all the options in this message?
-                main.log.info( "MPLS intent installed between " +
-                               str( ingressDevice ) + " and " +
-                               str( egressDevice ) )
-                match = re.search('id=0x([\da-f]+),', handle)
-                if match:
-                    return match.group()[3:-1]
-                else:
-                    main.log.error( "Error, intent ID not found" )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def removeIntent( self, intentId, app='org.onosproject.cli',
-                      purge=False, sync=False ):
-        """
-        Remove intent for specified application id and intent id
-        Optional args:-
-        -s or --sync: Waits for the removal before returning
-        -p or --purge: Purge the intent from the store after removal
-
-        Returns:
-            main.False on error and
-            cli output otherwise
-        """
-        try:
-            cmdStr = "remove-intent"
-            if purge:
-                cmdStr += " -p"
-            if sync:
-                cmdStr += " -s"
-
-            cmdStr += " " + app + " " + str( intentId )
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in removing intent" )
-                return main.FALSE
-            else:
-                # TODO: Should this be main.TRUE
-                return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def removeAllIntents( self, purge=False, sync=False, app='org.onosproject.cli' ):
-        """
-        Description:
-            Remove all the intents
-        Optional args:-
-            -s or --sync: Waits for the removal before returning
-            -p or --purge: Purge the intent from the store after removal
-        Returns:
-            Returns main.TRUE if all intents are removed, otherwise returns
-            main.FALSE; Returns None for exception
-        """
-        try:
-            cmdStr = "remove-intent"
-            if purge:
-                cmdStr += " -p"
-            if sync:
-                cmdStr += " -s"
-
-            cmdStr += " " + app
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in removing intent" )
-                return main.FALSE
-            else:
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def purgeWithdrawnIntents( self ):
-        """
-        Purges all WITHDRAWN Intents
-        """
-        try:
-            cmdStr = "purge-intents"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in purging intents" )
-                return main.FALSE
-            else:
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def routes( self, jsonFormat=False ):
-        """
-        NOTE: This method should be used after installing application:
-              onos-app-sdnip
-        Optional:
-            * jsonFormat: enable output formatting in json
-        Description:
-            Obtain all routes in the system
-        """
-        try:
-            cmdStr = "routes"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def ipv4RouteNumber( self ):
-        """
-        NOTE: This method should be used after installing application:
-              onos-app-sdnip
-        Description:
-            Obtain the total IPv4 routes number in the system
-        """
-        try:
-            cmdStr = "routes -s -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            jsonResult = json.loads( handle )
-            return jsonResult['totalRoutes4']
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, handle ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def intents( self, jsonFormat = True, summary = False, **intentargs):
-        """
-        Description:
-            Obtain intents from the ONOS cli.
-        Optional:
-            * jsonFormat: Enable output formatting in json, default to True
-            * summary: Whether only output the intent summary, defaults to False
-            * type: Only output a certain type of intent. This options is valid
-                    only when jsonFormat is True and summary is True.
-        """
-        try:
-            cmdStr = "intents"
-            if summary:
-                cmdStr += " -s"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            args = utilities.parse_args( [ "TYPE" ], **intentargs )
-            if "TYPE" in args.keys():
-                intentType = args[ "TYPE" ]
-            else:
-                intentType = ""
-            # IF we want the summary of a specific intent type
-            if jsonFormat and summary and ( intentType != "" ):
-                jsonResult = json.loads( handle )
-                if intentType in jsonResult.keys():
-                    return jsonResult[ intentType ]
-                else:
-                    main.log.error( "unknown TYPE, returning all types of intents" )
-                    return handle
-            else:
-                return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, handle ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getIntentState(self, intentsId, intentsJson=None):
-        """
-            Check intent state.
-            Accepts a single intent ID (string type) or a list of intent IDs.
-            Returns the state(string type) of the id if a single intent ID is
-            accepted.
-            Returns a dictionary with intent IDs as the key and its
-            corresponding states as the values
-            Parameters:
-            intentId: intent ID (string type)
-            intentsJson: parsed json object from the onos:intents api
-            Returns:
-            state = An intent's state- INSTALL,WITHDRAWN etc.
-            stateDict = Dictionary of intent's state. intent ID as the keys and
-            state as the values.
-        """
-        try:
-            state = "State is Undefined"
-            if not intentsJson:
-                rawJson = self.intents()
-            else:
-                rawJson = intentsJson
-            parsedIntentsJson = json.loads( rawJson )
-            if isinstance( intentsId, types.StringType ):
-                for intent in parsedIntentsJson:
-                    if intentsId == intent[ 'id' ]:
-                        state = intent[ 'state' ]
-                        return state
-                main.log.info( "Cannot find intent ID" + str( intentsId ) +
-                               " on the list" )
-                return state
-            elif isinstance( intentsId, types.ListType ):
-                dictList = []
-                for i in xrange( len( intentsId ) ):
-                    stateDict = {}
-                    for intents in parsedIntentsJson:
-                        if intentsId[ i ] == intents[ 'id' ]:
-                            stateDict[ 'state' ] = intents[ 'state' ]
-                            stateDict[ 'id' ] = intentsId[ i ]
-                            dictList.append( stateDict )
-                            break
-                if len( intentsId ) != len( dictList ):
-                    main.log.info( "Cannot find some of the intent ID state" )
-                return dictList
-            else:
-                main.log.info( "Invalid intents ID entry" )
-                return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def checkIntentState( self, intentsId, expectedState='INSTALLED' ):
-        """
-        Description:
-            Check intents state
-        Required:
-            intentsId - List of intents ID to be checked
-        Optional:
-            expectedState - Check the expected state(s) of each intents
-                            state in the list.
-                            *NOTE: You can pass in a list of expected state,
-                            Eg: expectedState = [ 'INSTALLED' , 'INSTALLING' ]
-        Return:
-            Returns main.TRUE only if all intent are the same as expected states
-            , otherwise, returns main.FALSE.
-        """
-        try:
-            # Generating a dictionary: intent id as a key and state as value
-            returnValue = main.TRUE
-            intentsDict = self.getIntentState( intentsId )
-            if len( intentsId ) != len( intentsDict ):
-                main.log.info( self.name + ": There is something wrong " +
-                               "getting intents state" )
-                return main.FALSE
-
-            if isinstance( expectedState, types.StringType ):
-                for intents in intentsDict:
-                    if intents.get( 'state' ) != expectedState:
-                        main.log.debug( self.name + " : Intent ID - " +
-                                        intents.get( 'id' ) +
-                                        " actual state = " +
-                                        intents.get( 'state' )
-                                        + " does not equal expected state = "
-                                        + expectedState )
-                        returnValue = main.FALSE
-
-            elif isinstance( expectedState, types.ListType ):
-                for intents in intentsDict:
-                    if not any( state == intents.get( 'state' ) for state in
-                                expectedState ):
-                        main.log.debug( self.name + " : Intent ID - " +
-                                        intents.get( 'id' ) +
-                                        " actual state = " +
-                                        intents.get( 'state' ) +
-                                        " does not equal expected states = "
-                                        + str( expectedState ) )
-                        returnValue = main.FALSE
-
-            if returnValue == main.TRUE:
-                main.log.info( self.name + ": All " +
-                               str( len( intentsDict ) ) +
-                               " intents are in " + str( expectedState ) +
-                               " state" )
-            return returnValue
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def checkIntentSummary( self, timeout=60 ):
-        """
-        Description:
-            Check the number of installed intents.
-        Optional:
-            timeout - the timeout for pexcept
-        Return:
-            Returns main.TRUE only if the number of all installed intents are the same as total intents number
-            , otherwise, returns main.FALSE.
-        """
-
-        try:
-            cmd = "intents -s -j"
-
-            # Check response if something wrong
-            response = self.sendline( cmd, timeout=timeout )
-            if response == None:
-                return main.False
-            response = json.loads( response )
-
-            # get total and installed number, see if they are match
-            allState = response.get( 'all' )
-            if allState.get('total') == allState.get('installed'):
-                main.log.info( 'Total Intents: {}   Installed Intents: {}'.format( allState.get('total'), allState.get('installed') ) )
-                return main.TRUE
-            main.log.info( 'Verified Intents failed Excepte intetnes: {} installed intents: {}'.format( allState.get('total'), allState.get('installed') ) )
-            return main.FALSE
-
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, response ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def command( self, cmd=None, jsonFormat=True, timeout=5 ):
-        """
-        To issue user  specified command
-        """
-        try:
-            if cmd is None:
-                return
-            cmdStr = cmd
-            if jsonFormat:
-                cmdStr += " -j "
-            handle = self.sendline( cmdStr, timeout=timeout )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error:", handle ):
-                main.log.error( str( handle ) )
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def masters( self, jsonFormat=True ):
-        """
-        Lists all devices and their corresponding master controller ip
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "masters"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def flows( self, state="", jsonFormat=True, timeout=60 ):
-        """
-        Optional:
-            * jsonFormat: enable output formatting in json
-        Description:
-            Obtain flows currently installed
-        """
-        try:
-            cmdStr = "flows"
-            if jsonFormat:
-                cmdStr += " -j "
-            cmdStr += state
-            handle = self.sendline( cmdStr, timeout=timeout )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error:", handle ):
-                main.log.error( self.name + ": flows() response: " +
-                                str( handle ) )
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-
-    def checkFlowsState( self, isPENDING=True, timeout=60 ):
-        """
-        Description:
-            Check the if all the current flows are in ADDED state
-            We check PENDING_ADD, PENDING_REMOVE, REMOVED, and FAILED flows,
-            if the count of those states is 0, which means all current flows
-            are in ADDED state, and return main.TRUE otherwise return main.FALSE
-        Optional:
-            * isPENDING:  whether the PENDING_ADD is also a correct status
-        Return:
-            returnValue - Returns main.TRUE only if all flows are in
-                          ADDED state or PENDING_ADD if the isPENDING
-                          parameter is set true, return main.FALSE otherwise.
-        """
-        try:
-            states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
-            checkedStates = []
-            statesCount = [0, 0, 0, 0]
-            for s in states:
-                rawFlows = self.flows( state=s, timeout = timeout )
-                checkedStates.append( json.loads( rawFlows ) )
-            for i in range( len( states ) ):
-                for c in checkedStates[i]:
-                    try:
-                        statesCount[i] += int( c.get( "flowCount" ) )
-                    except TypeError:
-                        main.log.exception( "Json object not as expected" )
-                main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
-
-            # We want to count PENDING_ADD if isPENDING is true
-            if isPENDING:
-                if statesCount[1] + statesCount[2] + statesCount[3] > 0:
-                    return main.FALSE
-            else:
-                if statesCount[0] + statesCount[1] + statesCount[2] + statesCount[3] > 0:
-                    return main.FALSE
-            return main.TRUE
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawFlows ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def pushTestIntents( self, ingress, egress, batchSize, offset="",
-                         options="", timeout=10, background = False ):
-        """
-        Description:
-            Push a number of intents in a batch format to
-            a specific point-to-point intent definition
-        Required:
-            * ingress: specify source dpid
-            * egress: specify destination dpid
-            * batchSize: specify number of intents to push
-        Optional:
-            * offset: the keyOffset is where the next batch of intents
-                      will be installed
-        Returns: If failed to push test intents, it will returen None,
-                 if successful, return true.
-                 Timeout expection will return None,
-                 TypeError will return false
-                 other expections will exit()
-        """
-        try:
-            if background:
-                back = "&"
-            else:
-                back = ""
-            cmd = "push-test-intents {} {} {} {} {} {}".format( options,
-                                                                ingress,
-                                                                egress,
-                                                                batchSize,
-                                                                offset,
-                                                                back )
-            response = self.sendline( cmd, timeout=timeout )
-            assert "Command not found:" not in response, response
-            main.log.info( response )
-            if response == None:
-                return None
-
-            # TODO: We should handle if there is failure in installation
-            return main.TRUE
-
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except pexpect.TIMEOUT:
-            main.log.error( self.name + ": ONOS timeout" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getTotalFlowsNum( self, timeout=60 ):
-        """
-        Description:
-            Get the number of ADDED flows.
-        Return:
-            The number of ADDED flows
-        """
-
-        try:
-            # get total added flows number
-            cmd = "flows -s|grep ADDED|wc -l"
-            totalFlows = self.sendline( cmd, timeout=timeout )
-
-            if totalFlows == None:
-                # if timeout, we will get total number of all flows, and subtract other states
-                states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
-                checkedStates = []
-                totalFlows = 0
-                statesCount = [0, 0, 0, 0]
-
-                # get total flows from summary
-                response = json.loads( self.sendline( "summary -j", timeout=timeout ) )
-                totalFlows = int( response.get("flows") )
-
-                for s in states:
-                    rawFlows = self.flows( state=s, timeout = timeout )
-                    if rawFlows == None:
-                        # if timeout, return the total flows number from summary command
-                        return totalFlows
-                    checkedStates.append( json.loads( rawFlows ) )
-
-                # Calculate ADDED flows number, equal total subtracts others
-                for i in range( len( states ) ):
-                    for c in checkedStates[i]:
-                        try:
-                            statesCount[i] += int( c.get( "flowCount" ) )
-                        except TypeError:
-                            main.log.exception( "Json object not as expected" )
-                    totalFlows = totalFlows - int( statesCount[i] )
-                    main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
-
-                return totalFlows
-
-            return totalFlows
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getTotalIntentsNum( self ):
-        """
-        Description:
-            Get the total number of intents, include every states.
-        Return:
-            The number of intents
-        """
-        try:
-            cmd = "summary -j"
-            response = self.sendline( cmd )
-            if response == None:
-                return  -1
-            response = json.loads( response )
-            return int( response.get("intents") )
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def intentsEventsMetrics( self, jsonFormat=True ):
-        """
-        Description:Returns topology metrics
-        Optional:
-            * jsonFormat: enable json formatting of output
-        """
-        try:
-            cmdStr = "intents-events-metrics"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def topologyEventsMetrics( self, jsonFormat=True ):
-        """
-        Description:Returns topology metrics
-        Optional:
-            * jsonFormat: enable json formatting of output
-        """
-        try:
-            cmdStr = "topology-events-metrics"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if handle:
-                return handle
-            elif jsonFormat:
-                # Return empty json
-                return '{}'
-            else:
-                return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    # Wrapper functions ****************
-    # Wrapper functions use existing driver
-    # functions and extends their use case.
-    # For example, we may use the output of
-    # a normal driver function, and parse it
-    # using a wrapper function
-
-    def getAllIntentsId( self ):
-        """
-        Description:
-            Obtain all intent id's in a list
-        """
-        try:
-            # Obtain output of intents function
-            intentsStr = self.intents(jsonFormat=False)
-            intentIdList = []
-
-            # Parse the intents output for ID's
-            intentsList = [ s.strip() for s in intentsStr.splitlines() ]
-            for intents in intentsList:
-                match = re.search('id=0x([\da-f]+),', intents)
-                if match:
-                    tmpId = match.group()[3:-1]
-                    intentIdList.append( tmpId )
-            return intentIdList
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def FlowAddedCount( self, deviceId ):
-        """
-        Determine the number of flow rules for the given device id that are
-        in the added state
-        """
-        try:
-            cmdStr = "flows any " + str( deviceId ) + " | " +\
-                     "grep 'state=ADDED' | wc -l"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getAllDevicesId( self ):
-        """
-        Use 'devices' function to obtain list of all devices
-        and parse the result to obtain a list of all device
-        id's. Returns this list. Returns empty list if no
-        devices exist
-        List is ordered sequentially
-
-        This function may be useful if you are not sure of the
-        device id, and wish to execute other commands using
-        the ids. By obtaining the list of device ids on the fly,
-        you can iterate through the list to get mastership, etc.
-        """
-        try:
-            # Call devices and store result string
-            devicesStr = self.devices( jsonFormat=False )
-            idList = []
-
-            if not devicesStr:
-                main.log.info( "There are no devices to get id from" )
-                return idList
-
-            # Split the string into list by comma
-            deviceList = devicesStr.split( "," )
-            # Get temporary list of all arguments with string 'id='
-            tempList = [ dev for dev in deviceList if "id=" in dev ]
-            # Split list further into arguments before and after string
-            # 'id='. Get the latter portion ( the actual device id ) and
-            # append to idList
-            for arg in tempList:
-                idList.append( arg.split( "id=" )[ 1 ] )
-            return idList
-
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getAllNodesId( self ):
-        """
-        Uses 'nodes' function to obtain list of all nodes
-        and parse the result of nodes to obtain just the
-        node id's.
-        Returns:
-            list of node id's
-        """
-        try:
-            nodesStr = self.nodes( jsonFormat=True )
-            idList = []
-            # Sample nodesStr output
-            # id=local, address=127.0.0.1:9876, state=READY *
-            if not nodesStr:
-                main.log.info( "There are no nodes to get id from" )
-                return idList
-            nodesJson = json.loads( nodesStr )
-            idList = [ node.get('id') for node in nodesJson ]
-            return idList
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, nodesStr ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getDevice( self, dpid=None ):
-        """
-        Return the first device from the devices api whose 'id' contains 'dpid'
-        Return None if there is no match
-        """
-        try:
-            if dpid is None:
-                return None
-            else:
-                dpid = dpid.replace( ':', '' )
-                rawDevices = self.devices()
-                devicesJson = json.loads( rawDevices )
-                # search json for the device with dpid then return the device
-                for device in devicesJson:
-                    # print "%s in  %s?" % ( dpid, device[ 'id' ] )
-                    if dpid in device[ 'id' ]:
-                        return device
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawDevices ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def checkStatus( self, ip, numoswitch, numolink, logLevel="info" ):
-        """
-        Checks the number of switches & links that ONOS sees against the
-        supplied values. By default this will report to main.log, but the
-        log level can be specified.
-
-        Params: ip = ip used for the onos cli
-                numoswitch = expected number of switches
-                numolink = expected number of links
-                logLevel = level to log to. Currently accepts
-                'info', 'warn' and 'report'
-
-
-        logLevel can
-
-        Returns: main.TRUE if the number of switches and links are correct,
-                 main.FALSE if the number of switches and links is incorrect,
-                 and main.ERROR otherwise
-        """
-        try:
-            topology = self.getTopology( ip )
-            if topology == {}:
-                return main.ERROR
-            output = ""
-            # Is the number of switches is what we expected
-            devices = topology.get( 'devices', False )
-            links = topology.get( 'links', False )
-            if devices is False or links is False:
-                return main.ERROR
-            switchCheck = ( int( devices ) == int( numoswitch ) )
-            # Is the number of links is what we expected
-            linkCheck = ( int( links ) == int( numolink ) )
-            if ( switchCheck and linkCheck ):
-                # We expected the correct numbers
-                output += "The number of links and switches match " +\
-                          "what was expected"
-                result = main.TRUE
-            else:
-                output += "The number of links and switches does not match " +\
-                          "what was expected"
-                result = main.FALSE
-            output = output + "\n ONOS sees %i devices (%i expected) \
-                    and %i links (%i expected)" % (
-                int( devices ), int( numoswitch ), int( links ),
-                int( numolink ) )
-            if logLevel == "report":
-                main.log.report( output )
-            elif logLevel == "warn":
-                main.log.warn( output )
-            else:
-                main.log.info( self.name + ": " + output )
-            return result
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def deviceRole( self, deviceId, onosNode, role="master" ):
-        """
-        Calls the device-role cli command.
-        deviceId must be the id of a device as seen in the onos devices command
-        onosNode is the ip of one of the onos nodes in the cluster
-        role must be either master, standby, or none
-
-        Returns:
-            main.TRUE or main.FALSE based on argument verification and
-            main.ERROR if command returns and error
-        """
-        try:
-            if role.lower() == "master" or role.lower() == "standby" or\
-                    role.lower() == "none":
-                cmdStr = "device-role " +\
-                    str( deviceId ) + " " +\
-                    str( onosNode ) + " " +\
-                    str( role )
-                handle = self.sendline( cmdStr )
-                assert "Command not found:" not in handle, handle
-                if re.search( "Error", handle ):
-                    # end color output to escape any colours
-                    # from the cli
-                    main.log.error( self.name + ": " +
-                                    handle + '\033[0m' )
-                    return main.ERROR
-                return main.TRUE
-            else:
-                main.log.error( "Invalid 'role' given to device_role(). " +
-                                "Value was '" + str(role) + "'." )
-                return main.FALSE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def clusters( self, jsonFormat=True ):
-        """
-        Lists all clusters
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "clusters"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def electionTestLeader( self ):
-        """
-        CLI command to get the current leader for the Election test application
-        NOTE: Requires installation of the onos-app-election feature
-        Returns: Node IP of the leader if one exists
-                 None if none exists
-                 Main.FALSE on error
-        """
-        try:
-            cmdStr = "election-test-leader"
-            response = self.sendline( cmdStr )
-            assert "Command not found:" not in response, response
-            # Leader
-            leaderPattern = "The\scurrent\sleader\sfor\sthe\sElection\s" +\
-                "app\sis\s(?P<node>.+)\."
-            nodeSearch = re.search( leaderPattern, response )
-            if nodeSearch:
-                node = nodeSearch.group( 'node' )
-                main.log.info( "Election-test-leader on " + str( self.name ) +
-                               " found " + node + " as the leader" )
-                return node
-            # no leader
-            nullPattern = "There\sis\scurrently\sno\sleader\selected\sfor\s" +\
-                "the\sElection\sapp"
-            nullSearch = re.search( nullPattern, response )
-            if nullSearch:
-                main.log.info( "Election-test-leader found no leader on " +
-                               self.name )
-                return None
-            # error
-            errorPattern = "Command\snot\sfound"
-            if re.search( errorPattern, response ):
-                main.log.error( "Election app is not loaded on " + self.name )
-                # TODO: Should this be main.ERROR?
-                return main.FALSE
-            else:
-                main.log.error( "Error in electionTestLeader on " + self.name +
-                                ": " + "unexpected response" )
-                main.log.error( repr( response ) )
-                return main.FALSE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def electionTestRun( self ):
-        """
-        CLI command to run for leadership of the Election test application.
-        NOTE: Requires installation of the onos-app-election feature
-        Returns: Main.TRUE on success
-                 Main.FALSE on error
-        """
-        try:
-            cmdStr = "election-test-run"
-            response = self.sendline( cmdStr )
-            assert "Command not found:" not in response, response
-            # success
-            successPattern = "Entering\sleadership\selections\sfor\sthe\s" +\
-                "Election\sapp."
-            search = re.search( successPattern, response )
-            if search:
-                main.log.info( self.name + " entering leadership elections " +
-                               "for the Election app." )
-                return main.TRUE
-            # error
-            errorPattern = "Command\snot\sfound"
-            if re.search( errorPattern, response ):
-                main.log.error( "Election app is not loaded on " + self.name )
-                return main.FALSE
-            else:
-                main.log.error( "Error in electionTestRun on " + self.name +
-                                ": " + "unexpected response" )
-                main.log.error( repr( response ) )
-                return main.FALSE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def electionTestWithdraw( self ):
-        """
-         * CLI command to withdraw the local node from leadership election for
-         * the Election test application.
-         #NOTE: Requires installation of the onos-app-election feature
-         Returns: Main.TRUE on success
-                  Main.FALSE on error
-        """
-        try:
-            cmdStr = "election-test-withdraw"
-            response = self.sendline( cmdStr )
-            assert "Command not found:" not in response, response
-            # success
-            successPattern = "Withdrawing\sfrom\sleadership\selections\sfor" +\
-                "\sthe\sElection\sapp."
-            if re.search( successPattern, response ):
-                main.log.info( self.name + " withdrawing from leadership " +
-                               "elections for the Election app." )
-                return main.TRUE
-            # error
-            errorPattern = "Command\snot\sfound"
-            if re.search( errorPattern, response ):
-                main.log.error( "Election app is not loaded on " + self.name )
-                return main.FALSE
-            else:
-                main.log.error( "Error in electionTestWithdraw on " +
-                                self.name + ": " + "unexpected response" )
-                main.log.error( repr( response ) )
-                return main.FALSE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getDevicePortsEnabledCount( self, dpid ):
-        """
-        Get the count of all enabled ports on a particular device/switch
-        """
-        try:
-            dpid = str( dpid )
-            cmdStr = "onos:ports -e " + dpid + " | wc -l"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            if re.search( "No such device", output ):
-                main.log.error( "Error in getting ports" )
-                return ( output, "Error" )
-            else:
-                return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return ( output, "Error" )
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getDeviceLinksActiveCount( self, dpid ):
-        """
-        Get the count of all enabled ports on a particular device/switch
-        """
-        try:
-            dpid = str( dpid )
-            cmdStr = "onos:links " + dpid + " | grep ACTIVE | wc -l"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            if re.search( "No such device", output ):
-                main.log.error( "Error in getting ports " )
-                return ( output, "Error " )
-            else:
-                return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return ( output, "Error " )
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getAllIntentIds( self ):
-        """
-        Return a list of all Intent IDs
-        """
-        try:
-            cmdStr = "onos:intents | grep id="
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            if re.search( "Error", output ):
-                main.log.error( "Error in getting ports" )
-                return ( output, "Error" )
-            else:
-                return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return ( output, "Error" )
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def intentSummary( self ):
-        """
-        Returns a dictionary containing the current intent states and the count
-        """
-        try:
-            intents = self.intents( )
-            states = []
-            for intent in json.loads( intents ):
-                states.append( intent.get( 'state', None ) )
-            out = [ ( i, states.count( i ) ) for i in set( states ) ]
-            main.log.info( dict( out ) )
-            return dict( out )
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, intents ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def leaders( self, jsonFormat=True ):
-        """
-        Returns the output of the leaders command.
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "onos:leaders"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def leaderCandidates( self, jsonFormat=True ):
-        """
-        Returns the output of the leaders -c command.
-        Optional argument:
-            * jsonFormat - boolean indicating if you want output in json
-        """
-        try:
-            cmdStr = "onos:leaders -c"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def specificLeaderCandidate( self, topic ):
-        """
-        Returns a list in format [leader,candidate1,candidate2,...] for a given
-        topic parameter and an empty list if the topic doesn't exist
-        If no leader is elected leader in the returned list will be "none"
-        Returns None if there is a type error processing the json object
-        """
-        try:
-            cmdStr = "onos:leaders -j"
-            rawOutput = self.sendline( cmdStr )
-            assert "Command not found:" not in rawOutput, rawOutput
-            output = json.loads( rawOutput )
-            results = []
-            for dict in output:
-                if dict["topic"] == topic:
-                    leader = dict["leader"]
-                    candidates = re.split( ", ", dict["candidates"][1:-1] )
-                    results.append( leader )
-                    results.extend( candidates )
-            return results
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawOutput ) )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def pendingMap( self, jsonFormat=True ):
-        """
-        Returns the output of the intent Pending map.
-        """
-        try:
-            cmdStr = "onos:intents -p"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def partitions( self, jsonFormat=True ):
-        """
-        Returns the output of the raft partitions command for ONOS.
-        """
-        # Sample JSON
-        # {
-        #     "leader": "tcp://10.128.30.11:7238",
-        #     "members": [
-        #         "tcp://10.128.30.11:7238",
-        #         "tcp://10.128.30.17:7238",
-        #         "tcp://10.128.30.13:7238",
-        #     ],
-        #     "name": "p1",
-        #     "term": 3
-        # },
-        try:
-            cmdStr = "onos:partitions"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def apps( self, jsonFormat=True ):
-        """
-        Returns the output of the apps command for ONOS. This command lists
-        information about installed ONOS applications
-        """
-        # Sample JSON object
-        # [{"name":"org.onosproject.openflow","id":0,"version":"1.2.0",
-        # "description":"ONOS OpenFlow protocol southbound providers",
-        # "origin":"ON.Lab","permissions":"[]","featuresRepo":"",
-        # "features":"[onos-openflow]","state":"ACTIVE"}]
-        try:
-            cmdStr = "onos:apps"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            return output
-        # FIXME: look at specific exceptions/Errors
-        except AssertionError:
-            main.log.exception( "Error in processing onos:app command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def appStatus( self, appName ):
-        """
-        Uses the onos:apps cli command to return the status of an application.
-        Returns:
-            "ACTIVE" - If app is installed and activated
-            "INSTALLED" - If app is installed and deactivated
-            "UNINSTALLED" - If app is not installed
-            None - on error
-        """
-        try:
-            if not isinstance( appName, types.StringType ):
-                main.log.error( self.name + ".appStatus(): appName must be" +
-                                " a string" )
-                return None
-            output = self.apps( jsonFormat=True )
-            appsJson = json.loads( output )
-            state = None
-            for app in appsJson:
-                if appName == app.get('name'):
-                    state = app.get('state')
-                    break
-            if state == "ACTIVE" or state == "INSTALLED":
-                return state
-            elif state is None:
-                return "UNINSTALLED"
-            elif state:
-                main.log.error( "Unexpected state from 'onos:apps': " +
-                                str( state ) )
-                return state
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, output ) )
-            main.stop()
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def app( self, appName, option ):
-        """
-        Interacts with the app command for ONOS. This command manages
-        application inventory.
-        """
-        try:
-            # Validate argument types
-            valid = True
-            if not isinstance( appName, types.StringType ):
-                main.log.error( self.name + ".app(): appName must be a " +
-                                "string" )
-                valid = False
-            if not isinstance( option, types.StringType ):
-                main.log.error( self.name + ".app(): option must be a string" )
-                valid = False
-            if not valid:
-                return main.FALSE
-            # Validate Option
-            option = option.lower()
-            # NOTE: Install may become a valid option
-            if option == "activate":
-                pass
-            elif option == "deactivate":
-                pass
-            elif option == "uninstall":
-                pass
-            else:
-                # Invalid option
-                main.log.error( "The ONOS app command argument only takes " +
-                                "the values: (activate|deactivate|uninstall)" +
-                                "; was given '" + option + "'")
-                return main.FALSE
-            cmdStr = "onos:app " + option + " " + appName
-            output = self.sendline( cmdStr )
-            if "Error executing command" in output:
-                main.log.error( "Error in processing onos:app command: " +
-                                str( output ) )
-                return main.FALSE
-            elif "No such application" in output:
-                main.log.error( "The application '" + appName +
-                                "' is not installed in ONOS" )
-                return main.FALSE
-            elif "Command not found:" in output:
-                main.log.error( "Error in processing onos:app command: " +
-                                str( output ) )
-                return main.FALSE
-            elif "Unsupported command:" in output:
-                main.log.error( "Incorrect command given to 'app': " +
-                                str( output ) )
-            # NOTE: we may need to add more checks here
-            # else: Command was successful
-            # main.log.debug( "app response: " + repr( output ) )
-            return main.TRUE
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def activateApp( self, appName, check=True ):
-        """
-        Activate an app that is already installed in ONOS
-        appName is the hierarchical app name, not the feature name
-        If check is True, method will check the status of the app after the
-        command is issued
-        Returns main.TRUE if the command was successfully sent
-                main.FALSE if the cli responded with an error or given
-                    incorrect input
-        """
-        try:
-            if not isinstance( appName, types.StringType ):
-                main.log.error( self.name + ".activateApp(): appName must be" +
-                                " a string" )
-                return main.FALSE
-            status = self.appStatus( appName )
-            if status == "INSTALLED":
-                response = self.app( appName, "activate" )
-                if check and response == main.TRUE:
-                    for i in range(10):  # try 10 times then give up
-                        status = self.appStatus( appName )
-                        if status == "ACTIVE":
-                            return main.TRUE
-                        else:
-                            main.log.debug( "The state of application " +
-                                            appName + " is " + status )
-                            time.sleep( 1 )
-                    return main.FALSE
-                else:  # not 'check' or command didn't succeed
-                    return response
-            elif status == "ACTIVE":
-                return main.TRUE
-            elif status == "UNINSTALLED":
-                main.log.error( self.name + ": Tried to activate the " +
-                                "application '" + appName + "' which is not " +
-                                "installed." )
-            else:
-                main.log.error( "Unexpected return value from appStatus: " +
-                                str( status ) )
-                return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def deactivateApp( self, appName, check=True ):
-        """
-        Deactivate an app that is already activated in ONOS
-        appName is the hierarchical app name, not the feature name
-        If check is True, method will check the status of the app after the
-        command is issued
-        Returns main.TRUE if the command was successfully sent
-                main.FALSE if the cli responded with an error or given
-                    incorrect input
-        """
-        try:
-            if not isinstance( appName, types.StringType ):
-                main.log.error( self.name + ".deactivateApp(): appName must " +
-                                "be a string" )
-                return main.FALSE
-            status = self.appStatus( appName )
-            if status == "INSTALLED":
-                return main.TRUE
-            elif status == "ACTIVE":
-                response = self.app( appName, "deactivate" )
-                if check and response == main.TRUE:
-                    for i in range(10):  # try 10 times then give up
-                        status = self.appStatus( appName )
-                        if status == "INSTALLED":
-                            return main.TRUE
-                        else:
-                            time.sleep( 1 )
-                    return main.FALSE
-                else:  # not check or command didn't succeed
-                    return response
-            elif status == "UNINSTALLED":
-                main.log.warn( self.name + ": Tried to deactivate the " +
-                                "application '" + appName + "' which is not " +
-                                "installed." )
-                return main.TRUE
-            else:
-                main.log.error( "Unexpected return value from appStatus: " +
-                                str( status ) )
-                return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def uninstallApp( self, appName, check=True ):
-        """
-        Uninstall an app that is already installed in ONOS
-        appName is the hierarchical app name, not the feature name
-        If check is True, method will check the status of the app after the
-        command is issued
-        Returns main.TRUE if the command was successfully sent
-                main.FALSE if the cli responded with an error or given
-                    incorrect input
-        """
-        # TODO: check with Thomas about the state machine for apps
-        try:
-            if not isinstance( appName, types.StringType ):
-                main.log.error( self.name + ".uninstallApp(): appName must " +
-                                "be a string" )
-                return main.FALSE
-            status = self.appStatus( appName )
-            if status == "INSTALLED":
-                response = self.app( appName, "uninstall" )
-                if check and response == main.TRUE:
-                    for i in range(10):  # try 10 times then give up
-                        status = self.appStatus( appName )
-                        if status == "UNINSTALLED":
-                            return main.TRUE
-                        else:
-                            time.sleep( 1 )
-                    return main.FALSE
-                else:  # not check or command didn't succeed
-                    return response
-            elif status == "ACTIVE":
-                main.log.warn( self.name + ": Tried to uninstall the " +
-                                "application '" + appName + "' which is " +
-                                "currently active." )
-                response = self.app( appName, "uninstall" )
-                if check and response == main.TRUE:
-                    for i in range(10):  # try 10 times then give up
-                        status = self.appStatus( appName )
-                        if status == "UNINSTALLED":
-                            return main.TRUE
-                        else:
-                            time.sleep( 1 )
-                    return main.FALSE
-                else:  # not check or command didn't succeed
-                    return response
-            elif status == "UNINSTALLED":
-                return main.TRUE
-            else:
-                main.log.error( "Unexpected return value from appStatus: " +
-                                str( status ) )
-                return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def appIDs( self, jsonFormat=True ):
-        """
-        Show the mappings between app id and app names given by the 'app-ids'
-        cli command
-        """
-        try:
-            cmdStr = "app-ids"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "Error in processing onos:app-ids command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def appToIDCheck( self ):
-        """
-        This method will check that each application's ID listed in 'apps' is
-        the same as the ID listed in 'app-ids'. The check will also check that
-        there are no duplicate IDs issued. Note that an app ID should be
-        a globaly unique numerical identifier for app/app-like features. Once
-        an ID is registered, the ID is never freed up so that if an app is
-        reinstalled it will have the same ID.
-
-        Returns: main.TRUE  if the check passes and
-                 main.FALSE if the check fails or
-                 main.ERROR if there is some error in processing the test
-        """
-        try:
-            bail = False
-            rawJson = self.appIDs( jsonFormat=True )
-            if rawJson:
-                ids = json.loads( rawJson )
-            else:
-                main.log.error( "app-ids returned nothing:" + repr( rawJson ) )
-                bail = True
-            rawJson = self.apps( jsonFormat=True )
-            if rawJson:
-                apps = json.loads( rawJson )
-            else:
-                main.log.error( "apps returned nothing:" + repr( rawJson ) )
-                bail = True
-            if bail:
-                return main.FALSE
-            result = main.TRUE
-            for app in apps:
-                appID = app.get( 'id' )
-                if appID is None:
-                    main.log.error( "Error parsing app: " + str( app ) )
-                    result = main.FALSE
-                appName = app.get( 'name' )
-                if appName is None:
-                    main.log.error( "Error parsing app: " + str( app ) )
-                    result = main.FALSE
-                # get the entry in ids that has the same appID
-                current = filter( lambda item: item[ 'id' ] == appID, ids )
-                # main.log.debug( "Comparing " + str( app ) + " to " +
-                #                 str( current ) )
-                if not current:  # if ids doesn't have this id
-                    result = main.FALSE
-                    main.log.error( "'app-ids' does not have the ID for " +
-                                    str( appName ) + " that apps does." )
-                elif len( current ) > 1:
-                    # there is more than one app with this ID
-                    result = main.FALSE
-                    # We will log this later in the method
-                elif not current[0][ 'name' ] == appName:
-                    currentName = current[0][ 'name' ]
-                    result = main.FALSE
-                    main.log.error( "'app-ids' has " + str( currentName ) +
-                                    " registered under id:" + str( appID ) +
-                                    " but 'apps' has " + str( appName ) )
-                else:
-                    pass  # id and name match!
-            # now make sure that app-ids has no duplicates
-            idsList = []
-            namesList = []
-            for item in ids:
-                idsList.append( item[ 'id' ] )
-                namesList.append( item[ 'name' ] )
-            if len( idsList ) != len( set( idsList ) ) or\
-               len( namesList ) != len( set( namesList ) ):
-                    main.log.error( "'app-ids' has some duplicate entries: \n"
-                                    + json.dumps( ids,
-                                                  sort_keys=True,
-                                                  indent=4,
-                                                  separators=( ',', ': ' ) ) )
-                    result = main.FALSE
-            return result
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getCfg( self, component=None, propName=None, short=False,
-                jsonFormat=True ):
-        """
-        Get configuration settings from onos cli
-        Optional arguments:
-            component - Optionally only list configurations for a specific
-                        component. If None, all components with configurations
-                        are displayed. Case Sensitive string.
-            propName - If component is specified, propName option will show
-                       only this specific configuration from that component.
-                       Case Sensitive string.
-            jsonFormat - Returns output as json. Note that this will override
-                         the short option
-            short - Short, less verbose, version of configurations.
-                    This is overridden by the json option
-        returns:
-            Output from cli as a string or None on error
-        """
-        try:
-            baseStr = "cfg"
-            cmdStr = " get"
-            componentStr = ""
-            if component:
-                componentStr += " " + component
-                if propName:
-                    componentStr += " " + propName
-            if jsonFormat:
-                baseStr += " -j"
-            elif short:
-                baseStr += " -s"
-            output = self.sendline( baseStr + cmdStr + componentStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            return output
-        except AssertionError:
-            main.log.exception( "Error in processing 'cfg get' command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setCfg( self, component, propName, value=None, check=True ):
-        """
-        Set/Unset configuration settings from ONOS cli
-        Required arguments:
-            component - The case sensitive name of the component whose
-                        property is to be set
-            propName - The case sensitive name of the property to be set/unset
-        Optional arguments:
-            value - The value to set the property to. If None, will unset the
-                    property and revert it to it's default value(if applicable)
-            check - Boolean, Check whether the option was successfully set this
-                    only applies when a value is given.
-        returns:
-            main.TRUE on success or main.FALSE on failure. If check is False,
-            will return main.TRUE unless there is an error
-        """
-        try:
-            baseStr = "cfg"
-            cmdStr = " set " + str( component ) + " " + str( propName )
-            if value is not None:
-                cmdStr += " " + str( value )
-            output = self.sendline( baseStr + cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            if value and check:
-                results = self.getCfg( component=str( component ),
-                                       propName=str( propName ),
-                                       jsonFormat=True )
-                # Check if current value is what we just set
-                try:
-                    jsonOutput = json.loads( results )
-                    current = jsonOutput[ 'value' ]
-                except ( TypeError, ValueError ):
-                    main.log.exception( "Error parsing cfg output" )
-                    main.log.error( "output:" + repr( results ) )
-                    return main.FALSE
-                if current == str( value ):
-                    return main.TRUE
-                return main.FALSE
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "Error in processing 'cfg set' command." )
-            return main.FALSE
-        except ( TypeError, ValueError ):
-            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, results ) )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setTestAdd( self, setName, values ):
-        """
-        CLI command to add elements to a distributed set.
-        Arguments:
-            setName - The name of the set to add to.
-            values - The value(s) to add to the set, space seperated.
-        Example usages:
-            setTestAdd( "set1", "a b c" )
-            setTestAdd( "set2", "1" )
-        returns:
-            main.TRUE on success OR
-            main.FALSE if elements were already in the set OR
-            main.ERROR on error
-        """
-        try:
-            cmdStr = "set-test-add " + str( setName ) + " " + str( values )
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Error executing command" not in output
-            positiveMatch = "\[(.*)\] was added to the set " + str( setName )
-            negativeMatch = "\[(.*)\] was already in set " + str( setName )
-            main.log.info( self.name + ": " + output )
-            if re.search( positiveMatch, output):
-                return main.TRUE
-            elif re.search( negativeMatch, output):
-                return main.FALSE
-            else:
-                main.log.error( self.name + ": setTestAdd did not" +
-                                " match expected output" )
-                main.log.debug( self.name + " actual: " + repr( output ) )
-                return main.ERROR
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' command. " )
-            return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setTestRemove( self, setName, values, clear=False, retain=False ):
-        """
-        CLI command to remove elements from a distributed set.
-        Required arguments:
-            setName - The name of the set to remove from.
-            values - The value(s) to remove from the set, space seperated.
-        Optional arguments:
-            clear - Clear all elements from the set
-            retain - Retain only the  given values. (intersection of the
-                     original set and the given set)
-        returns:
-            main.TRUE on success OR
-            main.FALSE if the set was not changed OR
-            main.ERROR on error
-        """
-        try:
-            cmdStr = "set-test-remove "
-            if clear:
-                cmdStr += "-c " + str( setName )
-            elif retain:
-                cmdStr += "-r " + str( setName ) + " " + str( values )
-            else:
-                cmdStr += str( setName ) + " " + str( values )
-            output = self.sendline( cmdStr )
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-            if clear:
-                pattern = "Set " + str( setName ) + " cleared"
-                if re.search( pattern, output ):
-                    return main.TRUE
-            elif retain:
-                positivePattern = str( setName ) + " was pruned to contain " +\
-                                  "only elements of set \[(.*)\]"
-                negativePattern = str( setName ) + " was not changed by " +\
-                                  "retaining only elements of the set " +\
-                                  "\[(.*)\]"
-                if re.search( positivePattern, output ):
-                    return main.TRUE
-                elif re.search( negativePattern, output ):
-                    return main.FALSE
-            else:
-                positivePattern = "\[(.*)\] was removed from the set " +\
-                                  str( setName )
-                if ( len( values.split() ) == 1 ):
-                    negativePattern = "\[(.*)\] was not in set " +\
-                                      str( setName )
-                else:
-                    negativePattern = "No element of \[(.*)\] was in set " +\
-                                      str( setName )
-                if re.search( positivePattern, output ):
-                    return main.TRUE
-                elif re.search( negativePattern, output ):
-                    return main.FALSE
-            main.log.error( self.name + ": setTestRemove did not" +
-                            " match expected output" )
-            main.log.debug( self.name + " expected: " + pattern )
-            main.log.debug( self.name + " actual: " + repr( output ) )
-            return main.ERROR
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' commandr. " )
-            return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setTestGet( self, setName, values="" ):
-        """
-        CLI command to get the elements in a distributed set.
-        Required arguments:
-            setName - The name of the set to remove from.
-        Optional arguments:
-            values - The value(s) to check if in the set, space seperated.
-        returns:
-            main.ERROR on error OR
-            A list of elements in the set if no optional arguments are
-                supplied OR
-            A tuple containing the list then:
-                main.FALSE if the given values are not in the set OR
-                main.TRUE if the given values are in the set OR
-        """
-        try:
-            values = str( values ).strip()
-            setName = str( setName ).strip()
-            length = len( values.split() )
-            containsCheck = None
-            # Patterns to match
-            setPattern = "\[(.*)\]"
-            pattern = "Items in set " + setName + ":\n" + setPattern
-            containsTrue = "Set " + setName + " contains the value " + values
-            containsFalse = "Set " + setName + " did not contain the value " +\
-                            values
-            containsAllTrue = "Set " + setName + " contains the the subset " +\
-                              setPattern
-            containsAllFalse = "Set " + setName + " did not contain the the" +\
-                               " subset " + setPattern
-
-            cmdStr = "set-test-get "
-            cmdStr += setName + " " + values
-            output = self.sendline( cmdStr )
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-
-            if length == 0:
-                match = re.search( pattern, output )
-            else:  # if given values
-                if length == 1:  # Contains output
-                    patternTrue = pattern + "\n" + containsTrue
-                    patternFalse = pattern + "\n" + containsFalse
-                else:  # ContainsAll output
-                    patternTrue = pattern + "\n" + containsAllTrue
-                    patternFalse = pattern + "\n" + containsAllFalse
-                matchTrue = re.search( patternTrue, output )
-                matchFalse = re.search( patternFalse, output )
-                if matchTrue:
-                    containsCheck = main.TRUE
-                    match = matchTrue
-                elif matchFalse:
-                    containsCheck = main.FALSE
-                    match = matchFalse
-                else:
-                    main.log.error( self.name + " setTestGet did not match " +\
-                                    "expected output" )
-                    main.log.debug( self.name + " expected: " + pattern )
-                    main.log.debug( self.name + " actual: " + repr( output ) )
-                    match = None
-            if match:
-                setMatch = match.group( 1 )
-                if setMatch == '':
-                    setList = []
-                else:
-                    setList = setMatch.split( ", " )
-                if length > 0:
-                    return ( setList, containsCheck )
-                else:
-                    return setList
-            else:  # no match
-                main.log.error( self.name + ": setTestGet did not" +
-                                " match expected output" )
-                main.log.debug( self.name + " expected: " + pattern )
-                main.log.debug( self.name + " actual: " + repr( output ) )
-                return main.ERROR
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' command." )
-            return main.ERROR
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.ERROR
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setTestSize( self, setName ):
-        """
-        CLI command to get the elements in a distributed set.
-        Required arguments:
-            setName - The name of the set to remove from.
-        returns:
-            The integer value of the size returned or
-            None on error
-        """
-        try:
-            # TODO: Should this check against the number of elements returned
-            #       and then return true/false based on that?
-            setName = str( setName ).strip()
-            # Patterns to match
-            setPattern = "\[(.*)\]"
-            pattern = "There are (\d+) items in set " + setName + ":\n" +\
-                          setPattern
-            cmdStr = "set-test-get -s "
-            cmdStr += setName
-            output = self.sendline( cmdStr )
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-            match = re.search( pattern, output )
-            if match:
-                setSize = int( match.group( 1 ) )
-                setMatch = match.group( 2 )
-                if len( setMatch.split() ) == setSize:
-                    main.log.info( "The size returned by " + self.name +
-                                   " matches the number of elements in " +
-                                   "the returned set" )
-                else:
-                    main.log.error( "The size returned by " + self.name +
-                                    " does not match the number of " +
-                                    "elements in the returned set." )
-                return setSize
-            else:  # no match
-                main.log.error( self.name + ": setTestGet did not" +
-                                " match expected output" )
-                main.log.debug( self.name + " expected: " + pattern )
-                main.log.debug( self.name + " actual: " + repr( output ) )
-                return None
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def counters( self, jsonFormat=True ):
-        """
-        Command to list the various counters in the system.
-        returns:
-            if jsonFormat, a string of the json object returned by the cli
-            command
-            if not jsonFormat, the normal string output of the cli command
-            None on error
-        """
-        try:
-            counters = {}
-            cmdStr = "counters"
-            if jsonFormat:
-                cmdStr += " -j"
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-            return output
-        except AssertionError:
-            main.log.exception( "Error in processing 'counters' command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def counterTestAddAndGet( self, counter, delta=1, inMemory=False ):
-        """
-        CLI command to add a delta to then get a distributed counter.
-        Required arguments:
-            counter - The name of the counter to increment.
-        Optional arguments:
-            delta - The long to add to the counter
-            inMemory - use in memory map for the counter
-        returns:
-            integer value of the counter or
-            None on Error
-        """
-        try:
-            counter = str( counter )
-            delta = int( delta )
-            cmdStr = "counter-test-increment "
-            if inMemory:
-                cmdStr += "-i "
-            cmdStr += counter
-            if delta != 1:
-                cmdStr += " " + str( delta )
-            output = self.sendline( cmdStr )
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-            pattern = counter + " was updated to (-?\d+)"
-            match = re.search( pattern, output )
-            if match:
-                return int( match.group( 1 ) )
-            else:
-                main.log.error( self.name + ": counterTestAddAndGet did not" +
-                                " match expected output." )
-                main.log.debug( self.name + " expected: " + pattern )
-                main.log.debug( self.name + " actual: " + repr( output ) )
-                return None
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def counterTestGetAndAdd( self, counter, delta=1, inMemory=False ):
-        """
-        CLI command to get a distributed counter then add a delta to it.
-        Required arguments:
-            counter - The name of the counter to increment.
-        Optional arguments:
-            delta - The long to add to the counter
-            inMemory - use in memory map for the counter
-        returns:
-            integer value of the counter or
-            None on Error
-        """
-        try:
-            counter = str( counter )
-            delta = int( delta )
-            cmdStr = "counter-test-increment -g "
-            if inMemory:
-                cmdStr += "-i "
-            cmdStr += counter
-            if delta != 1:
-                cmdStr += " " + str( delta )
-            output = self.sendline( cmdStr )
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                retryTime = 30  # Conservative time, given by Madan
-                main.log.info( "Waiting " + str( retryTime ) +
-                               "seconds before retrying." )
-                time.sleep( retryTime )  # Due to change in mastership
-                output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            assert "Error executing command" not in output, output
-            main.log.info( self.name + ": " + output )
-            pattern = counter + " was updated to (-?\d+)"
-            match = re.search( pattern, output )
-            if match:
-                return int( match.group( 1 ) )
-            else:
-                main.log.error( self.name + ": counterTestGetAndAdd did not" +
-                                " match expected output." )
-                main.log.debug( self.name + " expected: " + pattern )
-                main.log.debug( self.name + " actual: " + repr( output ) )
-                return None
-        except AssertionError:
-            main.log.exception( "Error in processing '" + cmdStr + "' command." )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def summary( self, jsonFormat=True ):
-        """
-        Description: Execute summary command in onos
-        Returns: json object ( summary -j ), returns main.FALSE if there is
-        no output
-
-        """
-        try:
-            cmdStr = "summary"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            assert "Error:" not in handle, handle
-            if not handle:
-                main.log.error( self.name + ": There is no output in " +
-                                "summary command" )
-                return main.FALSE
-            return handle
-        except AssertionError:
-            main.log.exception( "{} Error in summary output:".format( self.name ) )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def transactionalMapGet( self, keyName, inMemory=False ):
-        """
-        CLI command to get the value of a key in a consistent map using
-        transactions. This a test function and can only get keys from the
-        test map hard coded into the cli command
-        Required arguments:
-            keyName - The name of the key to get
-        Optional arguments:
-            inMemory - use in memory map for the counter
-        returns:
-            The string value of the key or
-            None on Error
-        """
-        try:
-            keyName = str( keyName )
-            cmdStr = "transactional-map-test-get "
-            if inMemory:
-                cmdStr += "-i "
-            cmdStr += keyName
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                return None
-            pattern = "Key-value pair \(" + keyName + ", (?P<value>.+)\) found."
-            if "Key " + keyName + " not found." in output:
-                return None
-            else:
-                match = re.search( pattern, output )
-                if match:
-                    return match.groupdict()[ 'value' ]
-                else:
-                    main.log.error( self.name + ": transactionlMapGet did not" +
-                                    " match expected output." )
-                    main.log.debug( self.name + " expected: " + pattern )
-                    main.log.debug( self.name + " actual: " + repr( output ) )
-                    return None
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def transactionalMapPut( self, numKeys, value, inMemory=False ):
-        """
-        CLI command to put a value into 'numKeys' number of keys in a
-        consistent map using transactions. This a test function and can only
-        put into keys named 'Key#' of the test map hard coded into the cli command
-        Required arguments:
-            numKeys - Number of keys to add the value to
-            value - The string value to put into the keys
-        Optional arguments:
-            inMemory - use in memory map for the counter
-        returns:
-            A dictionary whose keys are the name of the keys put into the map
-            and the values of the keys are dictionaries whose key-values are
-            'value': value put into map and optionaly
-            'oldValue': Previous value in the key or
-            None on Error
-
-            Example output
-            { 'Key1': {'oldValue': 'oldTestValue', 'value': 'Testing'},
-              'Key2': {'value': 'Testing'} }
-        """
-        try:
-            numKeys = str( numKeys )
-            value = str( value )
-            cmdStr = "transactional-map-test-put "
-            if inMemory:
-                cmdStr += "-i "
-            cmdStr += numKeys + " " + value
-            output = self.sendline( cmdStr )
-            assert "Command not found:" not in output, output
-            try:
-                # TODO: Maybe make this less hardcoded
-                # ConsistentMap Exceptions
-                assert "org.onosproject.store.service" not in output
-                # Node not leader
-                assert "java.lang.IllegalStateException" not in output
-            except AssertionError:
-                main.log.error( "Error in processing '" + cmdStr + "' " +
-                                "command: " + str( output ) )
-                return None
-            newPattern = 'Created Key (?P<key>(\w)+) with value (?P<value>(.)+)\.'
-            updatedPattern = "Put (?P<value>(.)+) into key (?P<key>(\w)+)\. The old value was (?P<oldValue>(.)+)\."
-            results = {}
-            for line in output.splitlines():
-                new = re.search( newPattern, line )
-                updated = re.search( updatedPattern, line )
-                if new:
-                    results[ new.groupdict()[ 'key' ] ] = { 'value': new.groupdict()[ 'value' ] }
-                elif updated:
-                    results[ updated.groupdict()[ 'key' ] ] = { 'value': updated.groupdict()[ 'value' ],
-                                                                'oldValue': updated.groupdict()[ 'oldValue' ] }
-                else:
-                    main.log.error( self.name + ": transactionlMapGet did not" +
-                                    " match expected output." )
-                    main.log.debug( "{} expected: {!r} or {!r}".format( self.name,
-                                                                        newPattern,
-                                                                        updatedPattern ) )
-                    main.log.debug( self.name + " actual: " + repr( output ) )
-            return results
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def maps( self, jsonFormat=True ):
-        """
-        Description: Returns result of onos:maps
-        Optional:
-            * jsonFormat: enable json formatting of output
-        """
-        try:
-            cmdStr = "maps"
-            if jsonFormat:
-                cmdStr += " -j"
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            return handle
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def getSwController( self, uri, jsonFormat=True ):
-        """
-        Descrition: Gets the controller information from the device
-        """
-        try:
-            cmd = "device-controllers "
-            if jsonFormat:
-                cmd += "-j "
-            response = self.sendline( cmd + uri )
-            assert "Command not found:" not in response, response
-            return response
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def setSwController( self, uri, ip, proto="tcp", port="6653", jsonFormat=True ):
-        """
-        Descrition: sets the controller(s) for the specified device
-
-        Parameters:
-            Required: uri - String: The uri of the device(switch).
-                      ip - String or List: The ip address of the controller.
-                      This parameter can be formed in a couple of different ways.
-                        VALID:
-                        10.0.0.1 - just the ip address
-                        tcp:10.0.0.1 - the protocol and the ip address
-                        tcp:10.0.0.1:6653 - the protocol and port can be specified,
-                                            so that you can add controllers with different
-                                            protocols and ports
-                        INVALID:
-                        10.0.0.1:6653 - this is not supported by ONOS
-
-            Optional: proto - The type of connection e.g. tcp, ssl. If a list of ips are given
-                      port - The port number.
-                      jsonFormat - If set ONOS will output in json NOTE: This is currently not supported
-
-        Returns: main.TRUE if ONOS returns without any errors, otherwise returns main.FALSE
-        """
-        try:
-            cmd = "device-setcontrollers"
-
-            if jsonFormat:
-                cmd += " -j"
-            cmd += " " + uri
-            if isinstance( ip, str ):
-                ip = [ip]
-            for item in ip:
-                if ":" in item:
-                    sitem = item.split( ":" )
-                    if len(sitem) == 3:
-                        cmd += " " + item
-                    elif "." in sitem[1]:
-                        cmd += " {}:{}".format(item, port)
-                    else:
-                        main.log.error( "Malformed entry: " + item )
-                        raise TypeError
-                else:
-                    cmd += " {}:{}:{}".format( proto, item, port )
-            response = self.sendline( cmd )
-            assert "Command not found:" not in response, response
-            if "Error" in response:
-                main.log.error( response )
-                return main.FALSE
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def removeDevice( self, device ):
-        '''
-        Description:
-            Remove a device from ONOS by passing the uri of the device(s).
-        Parameters:
-            device - (str or list) the id or uri of the device ex. "of:0000000000000001"
-        Returns:
-            Returns main.FALSE if an exception is thrown or an error is present
-            in the response. Otherwise, returns main.TRUE.
-        NOTE:
-            If a host cannot be removed, then this function will return main.FALSE
-        '''
-        try:
-            if type( device ) is str:
-                device = list( device )
-
-            for d in device:
-                time.sleep( 1 )
-                response = self.sendline( "device-remove {}".format( d ) )
-                assert "Command not found:" not in response, response
-                if "Error" in response:
-                    main.log.warn( "Error for device: {}\nResponse: {}".format( d, response ) )
-                    return main.FALSE
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return main.FALSE
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def removeHost( self, host ):
-        '''
-        Description:
-            Remove a host from ONOS by passing the id of the host(s)
-        Parameters:
-            hostId - (str or list) the id or mac of the host ex. "00:00:00:00:00:01"
-        Returns:
-            Returns main.FALSE if an exception is thrown or an error is present
-            in the response. Otherwise, returns main.TRUE.
-        NOTE:
-            If a host cannot be removed, then this function will return main.FALSE
-        '''
-        try:
-            if type( host ) is str:
-                host = list( host )
-
-            for h in host:
-                time.sleep( 1 )
-                response = self.sendline( "host-remove {}".format( h ) )
-                assert "Command not found:" not in response, response
-                if "Error" in response:
-                    main.log.warn( "Error for host: {}\nResponse: {}".format( h, response ) )
-                    return main.FALSE
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def link( self, begin, end, state ):
-        '''
-        Description:
-            Bring link down or up in the null-provider.
-        params:
-            begin - (string) One end of a device or switch.
-            end - (string) the other end of the device or switch
-        returns:
-            main.TRUE if no exceptions were thrown and no Errors are
-            present in the resoponse. Otherwise, returns main.FALSE
-        '''
-        try:
-            cmd =  "null-link null:{} null:{} {}".format( begin, end, state )
-            response = self.sendline( cmd, showResponse=True )
-            assert "Command not found:" not in response, response
-            if "Error" in response or "Failure" in response:
-                main.log.error( response )
-                return main.FALSE
-            return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return main.FALSE
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def host_remove( self, hostid ):
-	try:
-	    cmdStr = "host-remove" + " " + hostid
-	    handle = self.sendline( cmdStr )
-	    assert "Command not found:" not in handle, handle
-	    return handle
-	except AssertionError:
-	    main.log.exception( "" )
-	    return None
-	except TypeError:
-	    main.log.exception( self.name + ": Object not as expected" )
-	    return None
-        except pexpect.EOF:
-	    main.log.error( self.name + ": EOF exception found" )
-	    main.log.error( self.name + ":    " + self.handle.before )
-	    main.cleanup()
-	    main.exit()
-	except Exception:
-	    main.log.exception( self.name + ": Uncaught exception!" )
-	    main.cleanup()
-	    main.exit()
-
-    def cordVtnSyncNeutronStates( self, endpoint, password, tenant = 'admin', user = 'admin'):
-        """
-        Syncs VTN network with neutron
-        Required:
-            * openstack endpoint
-            * openstack password
-        """
-        try:
-            cmdStr = 'cordvtn-sync-neutron-states {} {} {} {}'.format(endpoint, tenant, user, password)
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in syncing vtn information" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                main.log.info("CordVTN state synced")
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    def cordVtnNodeInit( self, host):
-        """
-        Syncs VTN nodes with neutron
-        Required:
-            * openstack node host
-        """
-        try:
-            cmdStr = 'cordvtn-node-init {}'.format(host)
-            handle = self.sendline( cmdStr )
-            assert "Command not found:" not in handle, handle
-            if re.search( "Error", handle ):
-                main.log.error( "Error in syncing vtn node information" )
-                main.log.error( handle )
-                return main.FALSE
-            else:
-                main.log.info("CordVTN node state synced")
-                return main.TRUE
-        except AssertionError:
-            main.log.exception( "" )
-            return None
-        except TypeError:
-            main.log.exception( self.name + ": Object not as expected" )
-            return None
-        except pexpect.EOF:
-            main.log.error( self.name + ": EOF exception found" )
-            main.log.error( self.name + ":    " + self.handle.before )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-if __name__ == '__main__':
-  onos_cli = OnosCliDriver(connect = False)
-  name = 'onos_cli'
-  user = 'onos'
-  passwd = 'rocks'
-  ip = '172.17.0.2'
-  options = { 'name': '{0}'.format(name), 'onosIp' : '{0}'.format(ip) }
-  onos_cli.connect(name = 'onoscli', user_name = user, pwd = passwd, ip_address = ip,
-                   port = '8101', options = options)
-  device_str = onos_cli.devices(jsonFormat = False)
-  print('Devices: %s' %device_str)
-  device_json = onos_cli.devices()
-  print('Device json: %s' %device_json)
-  routes_str = onos_cli.routes(jsonFormat = False)
-  print('Routes %s' %routes_str)
-  flows_json = onos_cli.flows(state = "ADDED")
-  print('Flows %s' %flows_json)
-  onos_cli.disconnect()
diff --git a/src/test/cli/utilities.py b/src/test/cli/utilities.py
deleted file mode 100644
index 705c5e1..0000000
--- a/src/test/cli/utilities.py
+++ /dev/null
@@ -1,380 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-'''
-Created on 23-Oct-2012
-
-@authors: Anil Kumar (anilkumar.s@paxterrasolutions.com),
-          Raghav Kashyap(raghavkashyap@paxterrasolutions.com)
-
-
-
-    TestON is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 2 of the License, or
-    (at your option) any later version.
-
-    TestON is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
-
-
-Utilities will take care about the basic functions like :
-   * Extended assertion,
-   * parse_args for key-value pair handling
-   * Parsing the params or topology file.
-
-'''
-import re
-from configobj import ConfigObj
-import ast
-import smtplib
-
-import email
-import os
-import email.mime.application
-import time
-import random
-from clicommon import *
-
-class Utilities:
-    '''
-       Utilities will take care about the basic functions like :
-       * Extended assertion,
-       * parse_args for key-value pair handling
-       * Parsing the params or topology file.
-    '''
-
-    def __init__(self):
-        self.wrapped = sys.modules[__name__]
-
-    def __getattr__(self, name):
-        '''
-        This will invoke, if the attribute wasn't found the usual ways.
-        Here it will look for assert_attribute and will execute when AttributeError occurs.
-        It will return the result of the assert_attribute.
-        '''
-        try:
-            return getattr(self.wrapped, name)
-        except AttributeError:
-            def assertHandling(**kwargs):
-                nameVar = re.match("^assert",name,flags=0)
-                matchVar = re.match("assert(_not_|_)(equals|matches|greater|lesser)",name,flags=0)
-                notVar = 0
-                operators = ""
-
-                try :
-                    if matchVar.group(1) == "_not_" and matchVar.group(2) :
-                        notVar = 1
-                        operators = matchVar.group(2)
-                    elif matchVar.group(1) == "_" and matchVar.group(2):
-                        operators = matchVar.group(2)
-                except AttributeError:
-                    if matchVar==None and nameVar:
-                        operators ='equals'
-                result = self._assert(NOT=notVar,operator=operators,**kwargs)
-                if result == main.TRUE:
-                    main.log.info("Assertion Passed")
-                    main.STEPRESULT = main.TRUE
-                elif result == main.FALSE:
-                    main.log.warn("Assertion Failed")
-                    main.STEPRESULT = main.FALSE
-                else:
-                    main.log.error("There is an Error in Assertion")
-                    main.STEPRESULT = main.ERROR
-                return result
-            return assertHandling
-
-    def _assert (self,**assertParam):
-        '''
-        It will take the arguments :
-        expect:'Expected output'
-        actual:'Actual output'
-        onpass:'Action or string to be triggered or displayed respectively when the assert passed'
-        onfail:'Action or string to be triggered or displayed respectively when the assert failed'
-        not:'optional argument to specify the negation of the each assertion type'
-        operator:'assertion type will be defined by using operator. Like equal , greater, lesser, matches.'
-
-        It will return the assertion result.
-
-        '''
-
-        arguments = self.parse_args(["EXPECT","ACTUAL","ONPASS","ONFAIL","NOT","OPERATOR"],**assertParam)
-
-        result = 0
-        valuetype = ''
-        operation = "not "+ str(arguments["OPERATOR"]) if arguments['NOT'] and arguments['NOT'] == 1 else arguments["OPERATOR"]
-        operators = {'equals':{'STR':'==','NUM':'=='}, 'matches' : '=~', 'greater':'>' ,'lesser':'<'}
-
-        expectMatch = re.match('^\s*[+-]?0(e0)?\s*$', str(arguments["EXPECT"]), re.I+re.M)
-        if not ((not expectMatch) and (arguments["EXPECT"]==0)):
-            valuetype = 'NUM'
-        else :
-            if arguments["OPERATOR"] == 'greater' or arguments["OPERATOR"] == 'lesser':
-                main.log.error("Numeric comparison on strings is not possibele")
-                return main.ERROR
-
-        valuetype = 'STR'
-        arguments["ACTUAL"] = str(arguments["ACTUAL"])
-        if arguments["OPERATOR"] != 'matches':
-            arguments["EXPECT"] = str(arguments["EXPECT"])
-
-        try :
-            opcode = operators[str(arguments["OPERATOR"])][valuetype] if arguments["OPERATOR"] == 'equals' else operators[str(arguments["OPERATOR"])]
-
-        except KeyError as e:
-            print "Key Error in assertion"
-            print e
-            return main.FALSE
-
-        if opcode == '=~':
-            try:
-                assert re.search(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
-                result = main.TRUE
-            except AssertionError:
-                try :
-                    assert re.match(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
-                    result = main.TRUE
-                except AssertionError:
-                    main.log.error("Assertion Failed")
-                    result = main.FALSE
-        else :
-            try:
-                if str(opcode)=="==":
-                    main.log.info("Verifying the Expected is equal to the actual or not using assert_equal")
-                    if (arguments["EXPECT"] == arguments["ACTUAL"]):
-                        result = main.TRUE
-                    else :
-                        result = main.FALSE
-                elif str(opcode) == ">":
-                    main.log.info("Verifying the Expected is Greater than the actual or not using assert_greater")
-                    if (ast.literal_eval(arguments["EXPECT"]) > ast.literal_eval(arguments["ACTUAL"])) :
-                        result = main.TRUE
-                    else :
-                        result = main.FALSE
-                elif str(opcode) == "<":
-                    main.log.info("Verifying the Expected is Lesser than the actual or not using assert_lesser")
-                    if (ast.literal_eval(arguments["EXPECT"]) < ast.literal_eval(arguments["ACTUAL"])):
-                        result = main.TRUE
-                    else :
-                        result = main.FALSE
-            except AssertionError:
-                main.log.error("Assertion Failed")
-                result = main.FALSE
-        result = result if result else 0
-        result = not result if arguments["NOT"] and arguments["NOT"] == 1 else result
-        resultString = ""
-        if result :
-            resultString = str(resultString) + "PASS"
-            main.log.info(arguments["ONPASS"])
-        else :
-            resultString = str(resultString) + "FAIL"
-            if not isinstance(arguments["ONFAIL"],str):
-                eval(str(arguments["ONFAIL"]))
-            else :
-                main.log.error(arguments["ONFAIL"])
-                main.log.report(arguments["ONFAIL"])
-                main.onFailMsg = arguments[ 'ONFAIL' ]
-
-        msg = arguments["ON" + str(resultString)]
-
-        if not isinstance(msg,str):
-            try:
-                eval(str(msg))
-            except SyntaxError as e:
-                print "function definition is not right"
-                print e
-
-        main.last_result = result
-        if main.stepResults[2]:
-            main.stepResults[2][-1] = result
-            try:
-                main.stepResults[3][-1] = arguments[ 'ONFAIL' ]
-            except AttributeError:
-                pass
-        else:
-            main.log.warn( "Assertion called before a test step" )
-        return result
-
-    def parse_args(self,args, **kwargs):
-        '''
-        It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
-        '''
-        newArgs = {}
-        for key,value in kwargs.iteritems():
-            if isinstance(args,list) and str.upper(key) in args:
-                for each in args:
-                    if each==str.upper(key):
-                        newArgs [str(each)] = value
-                    elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
-                        newArgs[str(each)] = None
-
-        return newArgs
-
-    def send_mail(self):
-        # Create a text/plain message
-        msg = email.mime.Multipart.MIMEMultipart()
-        try :
-            if main.test_target:
-                sub = "Result summary of \"" + main.TEST + "\" run on component \"" +\
-                      main.test_target + "\" Version \"" +\
-                      vars( main )[main.test_target].get_version() + "\": " +\
-                      str( main.TOTAL_TC_SUCCESS ) + "% Passed"
-            else :
-                sub = "Result summary of \"" + main.TEST + "\": " +\
-                      str( main.TOTAL_TC_SUCCESS ) + "% Passed"
-        except ( KeyError, AttributeError ):
-            sub = "Result summary of \"" + main.TEST + "\": " +\
-                  str( main.TOTAL_TC_SUCCESS ) + "% Passed"
-
-        msg['Subject'] = sub
-        msg['From'] = main.sender
-        msg['To'] = main.mail
-
-        # The main body is just another attachment
-        body = email.mime.Text.MIMEText( main.logHeader + "\n" +
-                                         main.testResult)
-        msg.attach( body )
-
-        # Attachments
-        for filename in os.listdir( main.logdir ):
-            filepath = main.logdir + "/" + filename
-            fp = open( filepath, 'rb' )
-            att = email.mime.application.MIMEApplication( fp.read(),
-                                                          _subtype="" )
-            fp.close()
-            att.add_header( 'Content-Disposition',
-                            'attachment',
-                            filename=filename )
-            msg.attach( att )
-        try:
-            smtp = smtplib.SMTP( main.smtp )
-            smtp.starttls()
-            smtp.login( main.sender, main.senderPwd )
-            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
-            smtp.quit()
-        except Exception:
-            main.log.exception( "Error sending email" )
-        return main.TRUE
-
-    def send_warning_email( self, subject=None ):
-        try:
-            if not subject:
-                subject = main.TEST + " PAUSED!"
-            # Create a text/plain message
-            msg = email.mime.Multipart.MIMEMultipart()
-
-            msg['Subject'] = subject
-            msg['From'] = main.sender
-            msg['To'] = main.mail
-
-            smtp = smtplib.SMTP( main.smtp )
-            smtp.starttls()
-            smtp.login( main.sender, main.senderPwd )
-            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
-            smtp.quit()
-        except Exception:
-            main.log.exception( "" )
-            return main.FALSE
-        return main.TRUE
-
-    def parse(self,fileName):
-        '''
-        This will parse the params or topo or cfg file and return content in the file as Dictionary
-        '''
-        self.fileName = fileName
-        matchFileName = re.match(r'(.*)\.(cfg|params|topo)',self.fileName,re.M|re.I)
-        if matchFileName:
-            try :
-                parsedInfo = ConfigObj(self.fileName)
-                return parsedInfo
-            except StandardError:
-                print "There is no such file to parse "+fileName
-        else:
-            return 0
-
-    def retry( self, f, retValue, args=(), kwargs={},
-               sleep=1, attempts=2, randomTime=False ):
-        """
-        Given a function and bad return values, retry will retry a function
-        until successful or give up after a certain number of attempts.
-
-        Arguments:
-        f        - a callable object
-        retValue - Return value(s) of f to retry on. This can be a list or an
-                   object.
-        args     - A tuple containing the arguments of f.
-        kwargs   - A dictionary containing the keyword arguments of f.
-        sleep    - Time in seconds to sleep between retries. If random is True,
-                   this is the max time to wait. Defaults to 1 second.
-        attempts - Max number of attempts before returning. If set to 1,
-                   f will only be called once. Defaults to 2 trys.
-        random   - Boolean indicating if the wait time is random between 0
-                   and sleep or exactly sleep seconds. Defaults to False.
-        """
-        # TODO: be able to pass in a conditional statement(s). For example:
-        #      retCondition = "< 7"
-        #      Then we do something like 'if eval( "ret " + retCondition ):break'
-        try:
-            assert attempts > 0, "attempts must be more than 1"
-            assert sleep >= 0, "sleep must be >= 0"
-            if not isinstance( retValue, list ):
-                retValue = [ retValue ]
-            for i in range( 0, attempts ):
-                ret = f( *args, **kwargs )
-                if ret not in retValue:
-                # NOTE that False in [ 0 ] == True
-                    break
-                if randomTime:
-                    sleeptime = random.randint( 0, sleep )
-                else:
-                    sleeptime = sleep
-                time.sleep( sleeptime )
-            return ret
-        except AssertionError:
-            main.log.exception( "Invalid arguements for retry: " )
-            main.cleanup()
-            main.exit()
-        except Exception:
-            main.log.exception( "Uncaught exception in retry: " )
-            main.cleanup()
-            main.exit()
-
-utilities = Utilities()
-
-if __name__ != "__main__":
-    import sys
-
-    sys.modules[__name__] = Utilities()
diff --git a/src/test/cluster/__init__.py b/src/test/cluster/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/cluster/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/cluster/clusterTest.json b/src/test/cluster/clusterTest.json
deleted file mode 100644
index 0c05484..0000000
--- a/src/test/cluster/clusterTest.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-    "V_INF1" : "veth0",
-    "TLS_TIMEOUT" : 100,
-    "ITERATIONS" : 10,
-    "ARCHIVE_PARTITION" : false
-}
diff --git a/src/test/cluster/clusterTest.py b/src/test/cluster/clusterTest.py
deleted file mode 100644
index 481c7b4..0000000
--- a/src/test/cluster/clusterTest.py
+++ /dev/null
@@ -1,1876 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from scapy.all import *
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from CordTestUtils import get_mac, get_controller, get_controllers, log_test
-from OnosFlowCtrl import OnosFlowCtrl
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from onosclidriver import OnosCliDriver
-from CordContainer import Container, Onos, Quagga
-from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
-from portmaps import g_subscriber_port_map
-from scapy.all import *
-import time, monotonic
-import threading
-from threading import current_thread
-from Cluster import *
-from EapTLS import TLSAuthTest
-from ACL import ACLTest
-from OnosLog import OnosLog
-from CordLogger import CordLogger
-from CordTestConfig import setup_module, teardown_module
-import os
-import json
-import random
-import collections
-log_test.setLevel('INFO')
-
-class cluster_exchange(CordLogger):
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    mac = RandMAC()._fix()
-    flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
-    igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
-    igmp_ip = IP(dst = '224.0.0.22')
-    ONOS_INSTANCES = 3
-    V_INF1 = 'veth0'
-    TLS_TIMEOUT = 100
-    device_id = 'of:' + get_mac()
-    igmp = cluster_igmp()
-    igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
-    igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
-    tls = cluster_tls()
-    flows = cluster_flows()
-    proxyarp = cluster_proxyarp()
-    vrouter = cluster_vrouter()
-    acl = cluster_acl()
-    dhcprelay = cluster_dhcprelay()
-    subscriber = cluster_subscriber()
-    testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
-                       'test_cluster_single_controller_restarts', 'test_cluster_restarts')
-    ITERATIONS = int(os.getenv('ITERATIONS', 10))
-    ARCHIVE_PARTITION = False
-
-    def setUp(self):
-        if self._testMethodName not in self.testcaseLoggers:
-            super(cluster_exchange, self).setUp()
-
-    def tearDown(self):
-        if self._testMethodName not in self.testcaseLoggers:
-            super(cluster_exchange, self).tearDown()
-
-    def cliEnter(self, controller = None):
-        retries = 0
-        while retries < 30:
-            self.cli = OnosCliDriver(controller = controller, connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    def get_leader(self, controller = None):
-        self.cliEnter(controller = controller)
-        try:
-            result = json.loads(self.cli.leaders(jsonFormat = True))
-        except:
-            result = None
-
-        if result is None:
-            log_test.info('Leaders command failure for controller %s' %controller)
-        else:
-            log_test.info('Leaders returned: %s' %result)
-        self.cliExit()
-        return result
-
-    def onos_shutdown(self, controller = None):
-        status = True
-        self.cliEnter(controller = controller)
-        try:
-            self.cli.shutdown(timeout = 10)
-        except:
-            log_test.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
-            status = False
-
-        self.cliExit()
-        return status
-
-    def log_set(self, level = None, app = 'org.onosproject', controllers = None):
-        CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
-
-    def get_leaders(self, controller = None):
-        result_map = {}
-        if controller is None:
-            controller = get_controller()
-        if type(controller) in [ list, tuple ]:
-            for c in controller:
-                leaders = self.get_leader(controller = c)
-                result_map[c] = leaders
-        else:
-            leaders = self.get_leader(controller = controller)
-            result_map[controller] = leaders
-        return result_map
-
-    def verify_leaders(self, controller = None):
-        leaders_map = self.get_leaders(controller = controller)
-        failed = [ k for k,v in leaders_map.items() if v == None ]
-        return failed
-
-    def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
-	tries = 0
-	try:
-            self.cliEnter(controller = controller)
-	    while tries <= 10:
-                cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
-                if cluster_summary:
-	            log_test.info("cluster 'summary' command output is %s"%cluster_summary)
-		    nodes = cluster_summary['nodes']
-		    if verify:
-		        if nodes == onos_instances:
-		            self.cliExit()
-		            return True
-		        else:
-		            tries += 1
-		            time.sleep(1)
-		    else:
-			if nodes >= onos_instances:
-                            self.cliExit()
-                            return True
-                        else:
-                            tries += 1
-                            time.sleep(1)
-	        else:
-	            tries += 1
-	            time.sleep(1)
-	    self.cliExit()
-	    return False
-        except:
-            raise Exception('Failed to get cluster members')
-	    return False
-
-    def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
-        tries = 0
-	cluster_ips = []
-        try:
-            self.cliEnter(controller = controller)
-            while tries <= 10:
-                cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
-                if cluster_nodes:
-                    log_test.info("cluster 'nodes' output is %s"%cluster_nodes)
-                    if nodes_filter:
-                        cluster_nodes = nodes_filter(cluster_nodes)
-                    cluster_ips = map(lambda c: c['id'], cluster_nodes)
-		    self.cliExit()
-                    cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
-		    return cluster_ips
-		else:
-		    tries += 1
-	    self.cliExit()
-	    return cluster_ips
-        except:
-            raise Exception('Failed to get cluster members')
-            return cluster_ips
-
-    def get_cluster_container_names_ips(self,controller=None):
-        onos_names_ips = {}
-        controllers = get_controllers()
-        i = 0
-        for controller in controllers:
-            if i == 0:
-                name = Onos.NAME
-            else:
-                name = '{}-{}'.format(Onos.NAME, i+1)
-            onos_names_ips[controller] = name
-            onos_names_ips[name] = controller
-            i += 1
-        return onos_names_ips
-        # onos_ips = self.get_cluster_current_member_ips(controller=controller)
-        # onos_names_ips[onos_ips[0]] = Onos.NAME
-        # onos_names_ips[Onos.NAME] = onos_ips[0]
-        # for i in range(1,len(onos_ips)):
-        #     name = '{0}-{1}'.format(Onos.NAME,i+1)
-        #     onos_names_ips[onos_ips[i]] = name
-        #     onos_names_ips[name] = onos_ips[i]
-
-        # return onos_names_ips
-
-    #identifying current master of a connected device, not tested
-    def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
-	master = None
-	standbys = []
-	tries = 0
-	try:
-	    cli = self.cliEnter(controller = controller)
-	    while tries <= 10:
-	        roles = json.loads(self.cli.roles(jsonFormat = True))
-	        log_test.info("cluster 'roles' command output is %s"%roles)
-	        if roles:
-	            for device in roles:
-	                log_test.info('Verifying device info in line %s'%device)
-	                if device['id'] == device_id:
-	                    master = str(device['master'])
-		            standbys = map(lambda d: str(d), device['standbys'])
-		            log_test.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
-			    self.cliExit()
-		            return master, standbys
-		    self.cliExit()
-		    return master, standbys
-	        else:
-		    tries += 1
-		    time.sleep(1)
-	    self.cliExit()
-	    return master,standbys
-	except:
-            raise Exception('Failed to get cluster members')
-	    return master,standbys
-
-    def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
-	''' returns master and standbys of all the connected devices to ONOS cluster instance'''
-        device_dict = {}
-        tries = 0
-        try:
-            cli = self.cliEnter(controller = controller)
-            while tries <= 10:
-		device_dict = {}
-                roles = json.loads(self.cli.roles(jsonFormat = True))
-                log_test.info("cluster 'roles' command output is %s"%roles)
-                if roles:
-                    for device in roles:
-			device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
-                        for i in range(len(device_dict[device['id']]['standbys'])):
-			    device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
-                        log_test.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
-                    self.cliExit()
-                    return device_dict
-                else:
-                    tries += 1
-                    time.sleep(1)
-            self.cliExit()
-            return device_dict
-        except:
-            raise Exception('Failed to get cluster members')
-            return device_dict
-
-    #identify current master of a connected device, not tested
-    def get_cluster_connected_devices(self,controller=None):
-	'''returns all the devices connected to ONOS cluster'''
-        device_list = []
-        tries = 0
-        try:
-            cli = self.cliEnter(controller = controller)
-            while tries <= 10:
-		device_list = []
-                devices = json.loads(self.cli.devices(jsonFormat = True))
-                log_test.info("cluster 'devices' command output is %s"%devices)
-                if devices:
-                    for device in devices:
-			log_test.info('device id is %s'%device['id'])
-			device_list.append(str(device['id']))
-                    self.cliExit()
-                    return device_list
-                else:
-                    tries += 1
-                    time.sleep(1)
-            self.cliExit()
-            return device_list
-        except:
-            raise Exception('Failed to get cluster members')
-            return device_list
-
-    def get_number_of_devices_of_master(self,controller=None):
-	'''returns master-device pairs, which master having what devices'''
-	master_count = {}
-	try:
-	    cli = self.cliEnter(controller = controller)
-	    masters = json.loads(self.cli.masters(jsonFormat = True))
-	    if masters:
-		for master in masters:
-		    master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
-		return master_count
-	    else:
-		return master_count
-	except:
-            raise Exception('Failed to get cluster members')
-            return master_count
-
-    def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
-	if new_master is None: return False
-	self.cliEnter(controller=controller)
-        cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
-        command = self.cli.command(cmd = cmd, jsonFormat = False)
-        self.cliExit()
-        time.sleep(60)
-        master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
-        assert_equal(master,new_master)
-	log_test.info('Cluster master changed to %s successfully'%new_master)
-
-    def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
-	'''current master looses its mastership and hence new master will be elected'''
-        self.cliEnter(controller=controller)
-        cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
-        command = self.cli.command(cmd = cmd, jsonFormat = False)
-        self.cliExit()
-        time.sleep(60)
-        new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
-        assert_not_equal(new_master_ip,master_ip)
-	log_test.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
-	log_test.info('Cluster new master is %s'%new_master_ip)
-	return True
-
-    def cluster_controller_restarts(self, graceful = False):
-        controllers = get_controllers()
-        ctlr_len = len(controllers)
-        if ctlr_len <= 1:
-            log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
-            assert_greater(ctlr_len, 1)
-
-        #this call would verify the cluster for once
-        onos_map = self.get_cluster_container_names_ips()
-
-        def check_exception(iteration, controller = None):
-            adjacent_controller = None
-            adjacent_controllers = None
-            if controller:
-                adjacent_controllers = list(set(controllers) - set([controller]))
-                adjacent_controller = adjacent_controllers[0]
-            for node in controllers:
-                onosLog = OnosLog(host = node)
-                ##check the logs for storage exception
-                _, output = onosLog.get_log(('ERROR', 'Exception',))
-                if output and output.find('StorageException$Timeout') >= 0:
-                    log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
-                    log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    log_test.info('%s' %output)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    failed = self.verify_leaders(controllers)
-                    if failed:
-                        log_test.info('Leaders command failed on nodes: %s' %failed)
-                        log_test.error('Test failed on ITERATION %d' %iteration)
-                        CordLogger.archive_results(self._testMethodName,
-                                                   controllers = controllers,
-                                                   iteration = 'FAILED',
-                                                   archive_partition = self.ARCHIVE_PARTITION)
-                        assert_equal(len(failed), 0)
-                    return controller
-
-            try:
-                ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
-                log_test.info('ONOS cluster formed with controllers: %s' %ips)
-                st = True
-            except:
-                st = False
-
-            failed = self.verify_leaders(controllers)
-            if failed:
-                log_test.error('Test failed on ITERATION %d' %iteration)
-                CordLogger.archive_results(self._testMethodName,
-                                           controllers = controllers,
-                                           iteration = 'FAILED',
-                                           archive_partition = self.ARCHIVE_PARTITION)
-            assert_equal(len(failed), 0)
-            if st is False:
-                log_test.info('No storage exception and ONOS cluster was not formed successfully')
-            else:
-                controller = None
-
-            return controller
-
-        next_controller = None
-        tries = self.ITERATIONS
-        for num in range(tries):
-            index = num % ctlr_len
-            #index = random.randrange(0, ctlr_len)
-            controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
-            controller = onos_map[controller_name]
-            log_test.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
-            try:
-                #enable debug log for the other controllers before restarting this controller
-                adjacent_controllers = list( set(controllers) - set([controller]) )
-                self.log_set(controllers = adjacent_controllers)
-                self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
-                if graceful is True:
-                    log_test.info('Gracefully shutting down controller: %s' %controller)
-                    self.onos_shutdown(controller)
-                cord_test_onos_restart(node = controller, timeout = 0)
-                self.log_set(controllers = controller)
-                self.log_set(app = 'io.atomix', controllers = controller)
-                time.sleep(60)
-            except:
-                time.sleep(5)
-                continue
-
-            #first archive the test case logs for this run
-            CordLogger.archive_results(self._testMethodName,
-                                       controllers = controllers,
-                                       iteration = 'iteration_{}'.format(num+1),
-                                       archive_partition = self.ARCHIVE_PARTITION)
-            next_controller = check_exception(num, controller = controller)
-
-    def test_cluster_controller_restarts(self):
-        '''Test the cluster by repeatedly killing the controllers'''
-        self.cluster_controller_restarts()
-
-    def test_cluster_graceful_controller_restarts(self):
-        '''Test the cluster by repeatedly restarting the controllers gracefully'''
-        self.cluster_controller_restarts(graceful = True)
-
-    def test_cluster_single_controller_restarts(self):
-        '''Test the cluster by repeatedly restarting the same controller'''
-        controllers = get_controllers()
-        ctlr_len = len(controllers)
-        if ctlr_len <= 1:
-            log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
-            assert_greater(ctlr_len, 1)
-
-        #this call would verify the cluster for once
-        onos_map = self.get_cluster_container_names_ips()
-
-        def check_exception(iteration, controller, inclusive = False):
-            adjacent_controllers = list(set(controllers) - set([controller]))
-            adjacent_controller = adjacent_controllers[0]
-            controller_list = adjacent_controllers if inclusive == False else controllers
-            storage_exceptions = []
-            for node in controller_list:
-                onosLog = OnosLog(host = node)
-                ##check the logs for storage exception
-                _, output = onosLog.get_log(('ERROR', 'Exception',))
-                if output and output.find('StorageException$Timeout') >= 0:
-                    log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
-                    log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    log_test.info('%s' %output)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    storage_exceptions.append(node)
-
-            failed = self.verify_leaders(controller_list)
-            if failed:
-                log_test.info('Leaders command failed on nodes: %s' %failed)
-                if storage_exceptions:
-                    log_test.info('Storage exception seen on nodes: %s' %storage_exceptions)
-                    log_test.error('Test failed on ITERATION %d' %iteration)
-                    CordLogger.archive_results('test_cluster_single_controller_restarts',
-                                               controllers = controllers,
-                                               iteration = 'FAILED',
-                                               archive_partition = self.ARCHIVE_PARTITION)
-                    assert_equal(len(failed), 0)
-                    return controller
-
-            for ctlr in controller_list:
-                ips = self.get_cluster_current_member_ips(controller = ctlr,
-                                                          nodes_filter = \
-                                                          lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
-                log_test.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
-                if controller in ips and inclusive is False:
-                    log_test.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
-                if controller not in ips and inclusive is True:
-                    log_test.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
-
-            return controller
-
-        tries = self.ITERATIONS
-        #chose a random controller for shutdown/restarts
-        controller = controllers[random.randrange(0, ctlr_len)]
-        controller_name = onos_map[controller]
-        ##enable the log level for the controllers
-        self.log_set(controllers = controllers)
-        self.log_set(app = 'io.atomix', controllers = controllers)
-        for num in range(tries):
-            log_test.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
-            try:
-                cord_test_onos_shutdown(node = controller)
-                time.sleep(20)
-            except:
-                time.sleep(5)
-                continue
-            #check for exceptions on the adjacent nodes
-            check_exception(num, controller)
-            #Now restart the controller back
-            log_test.info('Restarting back the controller %s' %controller_name)
-            cord_test_onos_restart(node = controller)
-            self.log_set(controllers = controller)
-            self.log_set(app = 'io.atomix', controllers = controller)
-            time.sleep(60)
-            #archive the logs for this run
-            CordLogger.archive_results('test_cluster_single_controller_restarts',
-                                       controllers = controllers,
-                                       iteration = 'iteration_{}'.format(num+1),
-                                       archive_partition = self.ARCHIVE_PARTITION)
-            check_exception(num, controller, inclusive = True)
-
-    def test_cluster_restarts(self):
-        '''Test the cluster by repeatedly restarting the entire cluster'''
-        controllers = get_controllers()
-        ctlr_len = len(controllers)
-        if ctlr_len <= 1:
-            log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
-            assert_greater(ctlr_len, 1)
-
-        #this call would verify the cluster for once
-        onos_map = self.get_cluster_container_names_ips()
-
-        def check_exception(iteration):
-            controller_list = controllers
-            storage_exceptions = []
-            for node in controller_list:
-                onosLog = OnosLog(host = node)
-                ##check the logs for storage exception
-                _, output = onosLog.get_log(('ERROR', 'Exception',))
-                if output and output.find('StorageException$Timeout') >= 0:
-                    log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
-                    log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    log_test.info('%s' %output)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    storage_exceptions.append(node)
-
-            failed = self.verify_leaders(controller_list)
-            if failed:
-                log_test.info('Leaders command failed on nodes: %s' %failed)
-                if storage_exceptions:
-                    log_test.info('Storage exception seen on nodes: %s' %storage_exceptions)
-                    log_test.error('Test failed on ITERATION %d' %iteration)
-                    CordLogger.archive_results('test_cluster_restarts',
-                                               controllers = controllers,
-                                               iteration = 'FAILED',
-                                               archive_partition = self.ARCHIVE_PARTITION)
-                    assert_equal(len(failed), 0)
-                    return
-
-            for ctlr in controller_list:
-                ips = self.get_cluster_current_member_ips(controller = ctlr,
-                                                          nodes_filter = \
-                                                          lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
-                log_test.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
-                if len(ips) != len(controllers):
-                    log_test.error('Test failed on ITERATION %d' %iteration)
-                    CordLogger.archive_results('test_cluster_restarts',
-                                               controllers = controllers,
-                                               iteration = 'FAILED',
-                                               archive_partition = self.ARCHIVE_PARTITION)
-                assert_equal(len(ips), len(controllers))
-
-        tries = self.ITERATIONS
-        for num in range(tries):
-            log_test.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
-            try:
-                cord_test_restart_cluster()
-                self.log_set(controllers = controllers)
-                self.log_set(app = 'io.atomix', controllers = controllers)
-                log_test.info('Delaying before verifying cluster status')
-                time.sleep(60)
-            except:
-                time.sleep(10)
-                continue
-
-            #archive the logs for this run before verification
-            CordLogger.archive_results('test_cluster_restarts',
-                                       controllers = controllers,
-                                       iteration = 'iteration_{}'.format(num+1),
-                                       archive_partition = self.ARCHIVE_PARTITION)
-            #check for exceptions on the adjacent nodes
-            check_exception(num)
-
-    #pass
-    def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances = onos_instances)
-	assert_equal(status, True)
-	log_test.info('Cluster exists with %d ONOS instances'%onos_instances)
-
-    #nottest cluster not coming up properly if member goes down
-    def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances = onos_instances)
-	assert_equal(status, True)
-        onos_ips = self.get_cluster_current_member_ips()
-	onos_instances = len(onos_ips)+add
-        log_test.info('Adding %d nodes to the ONOS cluster' %add)
-        cord_test_onos_add_cluster(count = add)
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-
-    def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        log_test.info('Removing cluster current master %s'%(master))
-        cord_test_onos_shutdown(node = master)
-        time.sleep(60)
-        onos_instances -= 1
-        status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
-        assert_equal(status, True)
-	new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
-	assert_not_equal(master,new_master)
-	log_test.info('Successfully removed clusters master instance')
-
-    def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[0]]
-	log_test.info('Removing cluster member %s'%standbys[0])
-        cord_test_onos_shutdown(node = standbys[0])
-	time.sleep(60)
-	onos_instances -= 1
-        status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
-        assert_equal(status, True)
-
-    def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-       	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member1_onos_name = onos_names_ips[standbys[0]]
-        member2_onos_name = onos_names_ips[standbys[1]]
-        log_test.info('Removing cluster member %s'%standbys[0])
-        cord_test_onos_shutdown(node = standbys[0])
-        log_test.info('Removing cluster member %s'%standbys[1])
-        cord_test_onos_shutdown(node = standbys[1])
-        time.sleep(60)
-        onos_instances = onos_instances - 2
-        status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
-        assert_equal(status, True)
-
-    def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        for i in range(remove):
-	    member_onos_name = onos_names_ips[standbys[i]]
-            log_test.info('Removing onos container with name %s'%standbys[i])
-            cord_test_onos_shutdown(node = standbys[i])
-        time.sleep(60)
-        onos_instances = onos_instances - remove
-        status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
-        assert_equal(status, True)
-
-    #nottest test cluster not coming up properly if member goes down
-    def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        onos_ips = self.get_cluster_current_member_ips()
-        onos_instances = len(onos_ips)+add
-        log_test.info('Adding %d ONOS instances to the cluster'%add)
-        cord_test_onos_add_cluster(count = add)
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        log_test.info('Removing %d ONOS instances from the cluster'%remove)
-        for i in range(remove):
-            name = '{}-{}'.format(Onos.NAME, onos_instances - i)
-            log_test.info('Removing onos container with name %s'%name)
-            cord_test_onos_shutdown(node = name)
-        time.sleep(60)
-        onos_instances = onos_instances-remove
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-
-    #nottest cluster not coming up properly if member goes down
-    def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        onos_ips = self.get_cluster_current_member_ips()
-        onos_instances = onos_instances-remove
-        log_test.info('Removing %d ONOS instances from the cluster'%remove)
-        for i in range(remove):
-            name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
-            log_test.info('Removing onos container with name %s'%name)
-            cord_test_onos_shutdown(node = name)
-        time.sleep(60)
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        log_test.info('Adding %d ONOS instances to the cluster'%add)
-        cord_test_onos_add_cluster(count = add)
-        onos_instances = onos_instances+add
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-
-    def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	log_test.info('Restarting cluster')
-	cord_test_onos_restart()
-	status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-
-    def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        log_test.info('Restarting cluster master %s'%master)
-        cord_test_onos_restart(node = master)
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	log_test.info('Cluster came up after master restart as expected')
-
-    #test fail. master changing after restart. Need to check correct behavior.
-    def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        master1, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master1]
-        log_test.info('Restarting cluster master %s'%master1)
-        cord_test_onos_restart(node = master1)
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master2, standbys = self.get_cluster_current_master_standbys()
-	assert_equal(master1,master2)
-        log_test.info('Cluster master is same before and after cluster master restart as expected')
-
-    def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-	assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-	member_onos_name = onos_names_ips[standbys[0]]
-        log_test.info('Restarting cluster member %s'%standbys[0])
-        cord_test_onos_restart(node = standbys[0])
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	log_test.info('Cluster came up as expected after restarting one member')
-
-    def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member1_onos_name = onos_names_ips[standbys[0]]
-        member2_onos_name = onos_names_ips[standbys[1]]
-        log_test.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
-        cord_test_onos_restart(node = standbys[0])
-        cord_test_onos_restart(node = standbys[1])
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	log_test.info('Cluster came up as expected after restarting two members')
-
-    def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status,True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-	for i in range(members):
-            member_onos_name = onos_names_ips[standbys[i]]
-	    log_test.info('Restarting cluster member %s'%standbys[i])
-            cord_test_onos_restart(node = standbys[i])
-
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	log_test.info('Cluster came up as expected after restarting %d members'%members)
-
-    def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-	assert_equal(len(standbys),(onos_instances-1))
-        log_test.info('Cluster current master of devices is %s'%master)
-	self.change_master_current_cluster(new_master=standbys[0])
-        log_test.info('Cluster master changed successfully')
-
-    #tested on single onos setup.
-    def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	onos_ips = self.get_cluster_current_member_ips()
-        self.vrouter.setUpClass()
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True)
-        for onos_ip in onos_ips:
-            tries = 0
-            flag = False
-            try:
-                self.cliEnter(controller = onos_ip)
-                while tries <= 5:
-                    routes = json.loads(self.cli.routes(jsonFormat = True))
-                    if routes:
-                        assert_equal(len(routes['routes4']), networks)
-                        self.cliExit()
-                        flag = True
-                        break
-                    else:
-                        tries += 1
-                        time.sleep(1)
-                assert_equal(flag, True)
-            except:
-                log_test.info('Exception occured while checking routes in onos instance %s'%onos_ip)
-                raise
-
-    #tested on single onos setup.
-    def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        onos_ips = self.get_cluster_current_member_ips()
-	master, standbys = self.get_cluster_current_master_standbys()
-	onos_names_ips =  self.get_cluster_container_names_ips()
-	master_onos_name = onos_names_ips[master]
-        self.vrouter.setUpClass()
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-	assert_equal(res,True)
-        cord_test_onos_shutdown(node = master)
-	time.sleep(60)
-	log_test.info('Verifying vrouter traffic after cluster master is down')
-	self.vrouter.vrouter_traffic_verify()
-
-    #tested on single onos setup.
-    def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        onos_ips = self.get_cluster_current_member_ips()
-        master, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.vrouter.setUpClass()
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True)
-        cord_test_onos_restart()
-	self.vrouter.vrouter_traffic_verify()
-
-    #tested on single onos setup.
-    def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        self.vrouter.setUpClass()
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True)
-	self.vrouter.vrouter_activate(deactivate=True)
-        time.sleep(15)
-	self.vrouter.vrouter_traffic_verify(positive_test=False)
-	self.vrouter.vrouter_activate(deactivate=False)
-
-    #tested on single onos setup.
-    def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.vrouter.setUpClass()
-	log_test.info('Verifying vrouter before master down')
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True)
-	self.vrouter.vrouter_activate(deactivate=True)
-	log_test.info('Verifying vrouter traffic after app deactivated')
-        time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
-        self.vrouter.vrouter_traffic_verify(positive_test=False)
-	log_test.info('Verifying vrouter traffic after master down')
-        cord_test_onos_shutdown(node = master)
-	time.sleep(60)
-	self.vrouter.vrouter_traffic_verify(positive_test=False)
-        self.vrouter.vrouter_activate(deactivate=False)
-
-    #tested on single onos setup.
-    def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[0]]
-        self.vrouter.setUpClass()
-        log_test.info('Verifying vrouter before cluster member down')
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True) # Expecting vrouter should work properly
-        log_test.info('Verifying vrouter after cluster member down')
-        cord_test_onos_shutdown(node = standbys[0])
-	time.sleep(60)
-	self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
-
-    #tested on single onos setup.
-    def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[1]]
-        self.vrouter.setUpClass()
-        log_test.info('Verifying vrouter traffic before cluster member restart')
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True) # Expecting vrouter should work properly
-        cord_test_onos_restart(node = standbys[1])
-	log_test.info('Verifying vrouter traffic after cluster member restart')
-        self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
-
-    #tested on single onos setup.
-    def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-        self.vrouter.setUpClass()
-        log_test.info('Verifying vrouter traffic before cluster restart')
-        res = self.vrouter.vrouter_network_verify(networks, peers = 1)
-        assert_equal(res, True) # Expecting vrouter should work properly
-	cord_test_onos_restart()
-        log_test.info('Verifying vrouter traffic after cluster restart')
-        self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
-
-
-    #test fails because flow state is in pending_add in onos
-    def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances = onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.flows.setUpClass()
-        egress = 1
-        ingress = 2
-        egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
-        ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress,
-                            ingressPort = ingress,
-                            udpSrc = ingress_map['udp_port'],
-                            udpDst = egress_map['udp_port'],
-			    controller=master
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-                self.success = True
-            sniff(timeout=2,
-             lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
-                                and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
-
-	for i in [0,1]:
-	    if i == 1:
-                cord_test_onos_shutdown(node = master)
-                log_test.info('Verifying flows traffic after master killed')
-                time.sleep(45)
-	    else:
-		log_test.info('Verifying flows traffic before master killed')
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-            L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-            L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
-            pkt = L2/L3/L4
-            log_test.info('Sending packets to verify if flows are correct')
-            sendp(pkt, count=50, iface = self.flows.port_map[ingress])
-            t.join()
-            assert_equal(self.success, True)
-
-    def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        self.flows.setUpClass()
-        egress = 1
-        ingress = 2
-        egress_map = { 'ip': '192.168.30.1' }
-        ingress_map = { 'ip': '192.168.40.1' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress,
-                            ingressPort = ingress,
-                            ecn = 1,
-			    controller=master
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
-                        and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
-                                iface = self.flows.port_map[egress])
-	for i in [0,1]:
-	    if i == 1:
-		log_test.info('Changing cluster master to %s'%standbys[0])
-		self.change_master_current_cluster(new_master=standbys[0])
-		log_test.info('Verifying flow traffic after cluster master chnaged')
-	    else:
-		log_test.info('Verifying flow traffic  before cluster master changed')
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-            L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
-            pkt = L2/L3
-            log_test.info('Sending a packet to verify if flows are correct')
-            sendp(pkt, count=50, iface = self.flows.port_map[ingress])
-            t.join()
-            assert_equal(self.success, True)
-
-    #pass
-    def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.flows.setUpClass()
-        egress = 1
-        ingress = 2
-        egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress,
-                            ingressPort = ingress,
-                            ipv6_extension = 0,
-			    controller=master
-                            )
-
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
-                self.success = True
-            sniff(timeout=2,count=5,
-                  lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
-	for i in [0,1]:
-	    if i == 1:
-		log_test.info('Restart cluster current master %s'%master)
-                Container(master_onos_name,Onos.IMAGE).restart()
-                time.sleep(45)
-	        log_test.info('Verifying flow traffic after master restart')
-	    else:
-		log_test.info('Verifying flow traffic before master restart')
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            L2 = self.flows_eth
-            L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
-            pkt = L2/L3
-            log_test.info('Sending packets to verify if flows are correct')
-            sendp(pkt, count=50, iface = self.flows.port_map[ingress])
-            t.join()
-            assert_equal(self.success, True)
-
-    def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
-        dst_mac = self.igmp.iptomac(group)
-        eth = Ether(dst= dst_mac)
-        ip = IP(dst=group,src=source)
-        data = repr(monotonic.monotonic())
-        sendp(eth/ip/data,count=20, iface = intf)
-        pkt = (eth/ip/data)
-        log_test.info('multicast traffic packet %s'%pkt.show())
-
-    def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
-        log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
-        self.success = False
-        def recv_task():
-            def igmp_recv_cb(pkt):
-                log_test.info('multicast data received for group %s from source %s'%(group,source))
-                self.success = True
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
-        t = threading.Thread(target = recv_task)
-        t.start()
-        self.send_multicast_data_traffic(group,source=source)
-        t.join()
-        return self.success
-
-    #pass
-    def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-	self.igmp.setUp(controller=master)
-        groups = ['224.2.3.4','230.5.6.7']
-        src_list = ['2.2.2.2','3.3.3.3']
-        self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
-        assert_equal(status,False)
-	log_test.info('restarting cluster master %s'%master)
-	Container(master_onos_name,Onos.IMAGE).restart()
-	time.sleep(60)
-	log_test.info('verifying multicast data traffic after master restart')
-	status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
-        assert_equal(status,False)
-
-    #pass
-    def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.igmp.setUp(controller=master)
-        groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
-        src_list = [self.igmp.randomsourceip()]
-        self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
-        assert_equal(status,False)
-        log_test.info('Killing cluster master %s'%master)
-        Container(master_onos_name,Onos.IMAGE).kill()
-        time.sleep(60)
-	status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
-        assert_equal(status, True)
-        log_test.info('Verifying multicast data traffic after cluster master down')
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
-        assert_equal(status,False)
-
-    def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.igmp.setUp(controller=master)
-        groups = [self.igmp.random_mcast_ip()]
-        src_list = [self.igmp.randomsourceip()]
-        self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        log_test.info('Killing clusters master %s'%master)
-        Container(master_onos_name,Onos.IMAGE).kill()
-	count = 0
-	for i in range(60):
-            log_test.info('Verifying multicast data traffic after cluster master down')
-            status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-	    if status:
-		break
-	    else:
-		count += 1
-	        time.sleep(1)
-	assert_equal(status, True)
-	log_test.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
-
-
-    #pass
-    def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-	assert_equal(len(standbys), (onos_instances-1))
-        self.igmp.setUp(controller=master)
-        groups = [self.igmp.random_mcast_ip()]
-        src_list = [self.igmp.randomsourceip()]
-        self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-	log_test.info('Changing cluster master %s to %s'%(master,standbys[0]))
-	self.change_cluster_current_master(new_master=standbys[0])
-	log_test.info('Verifying multicast traffic after cluster master change')
-	status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        log_test.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
-                             iface = self.V_INF1, delay = 1)
-	time.sleep(10)
-        status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
-        assert_equal(status,False)
-
-    #pass
-    def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-	assert_equal(len(standbys), (onos_instances-1))
-        self.igmp.setUp(controller=master)
-        groups = [self.igmp.random_mcast_ip()]
-        src_list = [self.igmp.randomsourceip()]
-        self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
-	log_test.info('Changing cluster master %s to %s'%(master,standbys[0]))
-	self.change_cluster_current_master(new_master = standbys[0])
-        self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-	time.sleep(1)
-	self.change_cluster_current_master(new_master = master)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-
-    #pass
-    @deferred(TLS_TIMEOUT)
-    def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-	self.tls.setUp(controller=master)
-        df = defer.Deferred()
-        def eap_tls_verify(df):
-            tls = TLSAuthTest()
-            tls.runTest()
-            df.callback(0)
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(120)
-    def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        self.tls.setUp()
-        df = defer.Deferred()
-	def eap_tls_verify2(df2):
-            tls = TLSAuthTest()
-            tls.runTest()
-            df.callback(0)
-        for i in [0,1]:
-	    if i == 1:
-		log_test.info('Changing cluster master %s to %s'%(master, standbys[0]))
-		self.change_master_current_cluster(new_master=standbys[0])
-                log_test.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
-	    else:
-		log_test.info('Verifying tls authentication before cluster master change')
-            reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.tls.setUp()
-        df = defer.Deferred()
-        def eap_tls_verify(df):
-            tls = TLSAuthTest()
-            tls.runTest()
-            df.callback(0)
-        for i in [0,1]:
-            if i == 1:
-                log_test.info('Killing cluster current master %s'%master)
-                cord_test_onos_shutdown(node = master)
-		time.sleep(20)
-                status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
-		assert_equal(status, True)
-		log_test.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
-                log_test.info('Verifying tls authentication after killing cluster master')
-            reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[0]]
-	self.tls.setUp()
-        df = defer.Deferred()
-        def eap_tls_no_cert(df):
-            def tls_no_cert_cb():
-                log_test.info('TLS authentication failed with no certificate')
-            tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-	for i in [0,1]:
-	    if i == 1:
-	        log_test.info('Restart cluster member %s'%standbys[0])
-                Container(member_onos_name,Onos.IMAGE).restart()
-                time.sleep(20)
-                status = self.verify_cluster_status(onos_instances=onos_instances)
-                assert_equal(status, True)
-                log_test.info('Cluster came up with %d instances after member restart'%(onos_instances))
-                log_test.info('Verifying tls authentication after member restart')
-        reactor.callLater(0, eap_tls_no_cert, df)
-        return df
-
-    #pass
-    def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status,True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	self.proxyarp.setUpClass()
-        ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
-        ingress = hosts+1
-        for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-	log_test.info('changing cluster current master from %s to %s'%(master,standbys[0]))
-	self.change_cluster_current_master(new_master=standbys[0])
-	log_test.info('verifying proxyarp after master change')
-	for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-        log_test.info('Deactivating proxyarp  app and expecting proxyarp functionality not to work')
-        self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
-	time.sleep(3)
-        for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
-            time.sleep(1)
-        log_test.info('activating proxyarp  app and expecting to get arp reply from ONOS')
-        self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
-	time.sleep(3)
-        for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-
-    #pass
-    def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[1]]
-	self.proxyarp.setUpClass()
-        ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
-        ingress = hosts+1
-        for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-	log_test.info('killing cluster member %s'%standbys[1])
-        Container(member_onos_name,Onos.IMAGE).kill()
-        time.sleep(20)
-        status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
-        assert_equal(status, True)
-        log_test.info('cluster came up with %d instances after member down'%(onos_instances-1))
-        log_test.info('verifying proxy arp functionality after cluster member down')
-	for hostip, hostmac in hosts_config:
-            self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-
-    #pass
-    def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	self.proxyarp.setUpClass()
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys), (onos_instances-1))
-        ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
-        self.success = True
-        ingress = hosts+1
-        ports = range(ingress,ingress+10)
-        hostmac = []
-        hostip = []
-        for ip,mac in hosts_config:
-            hostmac.append(mac)
-            hostip.append(ip)
-        success_dir = {}
-        def verify_proxyarp(*r):
-            ingress, hostmac, hostip = r[0],r[1],r[2]
-            def mac_recv_task():
-                def recv_cb(pkt):
-                    log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                    success_dir[current_thread().name] = True
-                sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
-                    prn = recv_cb, iface = self.proxyarp.port_map[ingress])
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
-            log_test.info('Sending arp request  for dest ip %s on interface %s' %
-                 (hostip,self.proxyarp.port_map[ingress]))
-            sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
-            t.join()
-        t = []
-        for i in range(10):
-            t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-            t[i].start()
-	time.sleep(2)
-        for i in range(10):
-            t[i].join()
-        if len(success_dir) != 10:
-                self.success = False
-        assert_equal(self.success, True)
-
-    #pass
-    def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	self.acl.setUp()
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result = acl_rule.get_acl_rules(controller=master)
-        aclRules1 = result.json()['aclRules']
-	log_test.info('Added acl rules is %s'%aclRules1)
-        acl_Id = map(lambda d: d['id'], aclRules1)
-	log_test.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
-	self.change_cluster_current_master(new_master=standbys[0])
-        status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    #pass
-    def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	onos_names_ips =  self.get_cluster_container_names_ips()
-	master_onos_name = onos_names_ips[master]
-        self.acl.setUp()
-        acl_rule = ACLTest()
-        status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        result1 = acl_rule.get_acl_rules(controller=master)
-        aclRules1 = result1.json()['aclRules']
-        log_test.info('Added acl rules is %s'%aclRules1)
-        acl_Id1 = map(lambda d: d['id'], aclRules1)
-        log_test.info('Killing cluster current master %s'%master)
-	Container(master_onos_name,Onos.IMAGE).kill()
-	time.sleep(45)
-	status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
-        assert_equal(status, True)
-        new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
-	assert_equal(len(standbys),(onos_instances-2))
-	assert_not_equal(new_master,master)
-        result2 = acl_rule.get_acl_rules(controller=new_master)
-        aclRules2 = result2.json()['aclRules']
-	acl_Id2 = map(lambda d: d['id'], aclRules2)
-	log_test.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
-	assert_equal(acl_Id2,acl_Id1)
-
-    #acl traffic scenario not working as acl rule is not getting added to onos
-    def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        member1_onos_name = onos_names_ips[standbys[0]]
-        member2_onos_name = onos_names_ips[standbys[1]]
-        ingress = self.acl.ingress_iface
-        egress = self.acl.CURRENT_PORT_NUM
-        acl_rule = ACLTest()
-        status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
-        self.acl.CURRENT_PORT_NUM += 1
-        time.sleep(5)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        srcMac = '00:00:00:00:00:11'
-        dstMac = host_ip_mac[0][1]
-        self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-        status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
-        time.sleep(10)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-	log_test.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
-        Container(member1_onos_name, Onos.IMAGE).kill()
-        Container(member2_onos_name, Onos.IMAGE).kill()
-	time.sleep(40)
-	status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
-        assert_equal(status, True)
-	self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
-        self.acl.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-
-    #pass
-    def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	self.dhcprelay.setUpClass(controller=master)
-        mac = self.dhcprelay.get_mac(iface)
-        self.dhcprelay.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.dhcprelay.default_config
-        options = self.dhcprelay.default_options
-        subnet = self.dhcprelay.default_subnet_config
-        dhcpd_interface_list = self.dhcprelay.relay_interfaces
-        self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet,
-			 controller=master)
-        self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-        cip, sip = self.dhcprelay.send_recv(mac)
-	log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-	self.change_master_current_cluster(new_master=standbys[0])
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        log_test.info('Triggering DHCP discover again after release')
-        cip2, sip2 = self.dhcprelay.send_recv(mac)
-        log_test.info('Verifying released IP was given back on rediscover')
-        assert_equal(cip, cip2)
-        log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-        assert_equal(self.dhcprelay.dhcp.release(cip2), True)
-	self.dhcprelay.tearDownClass(controller=standbys[0])
-
-
-    def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	onos_names_ips =  self.get_cluster_container_names_ips()
-        master_onos_name = onos_names_ips[master]
-        self.dhcprelay.setUpClass(controller=master)
-        mac = self.dhcprelay.get_mac(iface)
-        self.dhcprelay.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.dhcprelay.default_config
-        options = self.dhcprelay.default_options
-        subnet = self.dhcprelay.default_subnet_config
-        dhcpd_interface_list = self.dhcprelay.relay_interfaces
-        self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet,
-			 controller=master)
-        self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        log_test.info('Initiating dhcp process from client %s'%mac)
-        cip, sip = self.dhcprelay.send_recv(mac)
-        log_test.info('Killing cluster current master %s'%master)
-	Container(master_onos_name, Onos.IMAGE).kill()
-	time.sleep(60)
-	status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
-        assert_equal(status, True)
-	mac = self.dhcprelay.dhcp.get_mac(cip)[0]
-        log_test.info("Verifying dhcp clients gets same IP after cluster master restarts")
-        new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
-        assert_equal(new_cip, cip)
-	self.dhcprelay.tearDownClass(controller=standbys[0])
-
-    #pass
-    def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-	self.dhcprelay.setUpClass(controller=master)
-        macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
-        self.dhcprelay.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.dhcprelay.default_config
-        options = self.dhcprelay.default_options
-        subnet = self.dhcprelay.default_subnet_config
-        dhcpd_interface_list = self.dhcprelay.relay_interfaces
-        self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet,
-			 controller=master)
-        self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip1, sip1 = self.dhcprelay.send_recv(macs[0])
-	assert_not_equal(cip1,None)
-        log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
-        log_test.info('Changing cluster master from %s to %s'%(master, standbys[0]))
-	self.change_master_current_cluster(new_master=standbys[0])
-	cip2, sip2 = self.dhcprelay.send_recv(macs[1])
-	assert_not_equal(cip2,None)
-	log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
-	self.change_master_current_cluster(new_master=master)
-        log_test.info('Changing cluster master from %s to %s'%(standbys[0],master))
-        cip3, sip3 = self.dhcprelay.send_recv(macs[2])
-	assert_not_equal(cip3,None)
-	log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
-	self.dhcprelay.tearDownClass(controller=standbys[0])
-
-    def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	self.subscriber.setUpClass(controller=master)
-        self.subscriber.num_subscribers = 5
-        self.subscriber.num_channels = 10
-	for i in [0,1]:
-	    if i == 1:
-		cord_test_onos_restart()
-		time.sleep(45)
-		status = self.verify_cluster_status(onos_instances=onos_instances)
-		assert_equal(status, True)
-		log_test.info('Verifying cord subscriber functionality after cluster restart')
-	    else:
-		log_test.info('Verifying cord subscriber functionality before cluster restart')
-            test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
-                                                    num_channels = self.subscriber.num_channels,
-                                                    cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
-                                                           self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
-                                                    port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
-                                                                                        self.subscriber.num_channels))
-            assert_equal(test_status, True)
-	self.subscriber.tearDownClass(controller=master)
-
-    #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
-    def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master,standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        self.subscriber.setUpClass(controller=master)
-        self.subscriber.num_subscribers = 5
-        self.subscriber.num_channels = 10
-        for i in [0,1]:
-            if i == 1:
-		status=self.withdraw_cluster_current_mastership(master_ip=master)
-		asser_equal(status, True)
-		master,standbys = self.get_cluster_current_master_standbys()
-                log_test.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
-            else:
-		 log_test.info('verifying cord subscriber functionality before cluster master withdraw mastership')
-            test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
-                                                    num_channels = self.subscriber.num_channels,
-                                                    cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
-                                                           self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
-                                                    port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
-                                                                                        self.subscriber.num_channels),controller=master)
-            assert_equal(test_status, True)
-        self.subscriber.tearDownClass(controller=master)
-
-    #not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
-    def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member_onos_name = onos_names_ips[standbys[0]]
-	self.subscriber.setUpClass(controller=master)
-	num_subscribers = 1
-        num_channels = 10
-	for i in [0,1]:
-	    if i == 1:
-                cord_test_onos_shutdown(node = standbys[0])
-		time.sleep(30)
-		status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
-                assert_equal(status, True)
-		log_test.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
-	    else:
-		log_test.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
-            test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
-                                                              self.subscriber.igmp_verify, self.subscriber.traffic_verify),
-                                                    port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all',controller=master)
-            assert_equal(test_status, True)
-	self.subscriber.tearDownClass(controller=master)
-
-    def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	master, standbys = self.get_cluster_current_master_standbys()
-        assert_equal(len(standbys),(onos_instances-1))
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        member1_onos_name = onos_names_ips[standbys[0]]
-	member2_onos_name = onos_names_ips[standbys[1]]
-	self.subscriber.setUpClass(controller=master)
-        num_subscribers = 1
-        num_channels = 10
-	for i in [0,1]:
-	    if i == 1:
-                cord_test_onos_shutdown(node = standbys[0])
-                cord_test_onos_shutdown(node = standbys[1])
-		time.sleep(60)
-		status = self.verify_cluster_status(onos_instances=onos_instances-2)
-                assert_equal(status, True)
-		log_test.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
-	    else:
-		log_test.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
-	    test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
-                                                           self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
-                                                    port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-            assert_equal(test_status, True)
-	self.subscriber.tearDownClass(controller=master)
-
-    #pass
-    def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
-	status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
-	for device in device_dict.keys():
-	    log_test.info("Device is %s"%device_dict[device])
-	    assert_not_equal(device_dict[device]['master'],'none')
-	    log_test.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
-	    assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
-
-    #pass
-    def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
-	cluster_ips = self.get_cluster_current_member_ips()
-	for ip in cluster_ips:
-	    device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
-	    assert_equal(len(device_dict.keys()),onos_instances)
-            for device in device_dict.keys():
-                log_test.info("Device is %s"%device_dict[device])
-                assert_not_equal(device_dict[device]['master'],'none')
-                log_test.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
-                assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
-
-    #pass
-    def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-	onos_names_ips =  self.get_cluster_container_names_ips()
-	master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information is %s'%master_count)
-	total_devices = 0
-	for master in master_count.keys():
-	    total_devices += master_count[master]['size']
-	    if master_count[master]['size'] != 0:
-		restart_ip = master
-	assert_equal(total_devices,onos_instances)
-	member_onos_name = onos_names_ips[restart_ip]
-	log_test.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
-        Container(member_onos_name, Onos.IMAGE).restart()
-	time.sleep(40)
-	master_count = self.get_number_of_devices_of_master()
-	log_test.info('Master count information after restart is %s'%master_count)
-	total_devices = 0
-        for master in master_count.keys():
-            total_devices += master_count[master]['size']
-	    if master == restart_ip:
-		assert_equal(master_count[master]['size'], 0)
-	assert_equal(total_devices,onos_instances)
-
-    #pass
-    def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        onos_names_ips =  self.get_cluster_container_names_ips()
-        master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += master_count[master]['size']
-            if master_count[master]['size'] != 0:
-                restart_ip = master
-        assert_equal(total_devices,onos_instances)
-        master_onos_name = onos_names_ips[restart_ip]
-        log_test.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
-        Container(master_onos_name, Onos.IMAGE).kill()
-        time.sleep(40)
-	for ip in onos_names_ips.keys():
-	    if ip != restart_ip:
-		controller_ip = ip
-	status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
-        assert_equal(status, True)
-        master_count = self.get_number_of_devices_of_master(controller=controller_ip)
-        log_test.info('Master count information after restart is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += master_count[master]['size']
-            if master == restart_ip:
-                assert_equal(master_count[master]['size'], 0)
-        assert_equal(total_devices,onos_instances)
-
-    #pass
-    def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += int(master_count[master]['size'])
-            if master_count[master]['size'] != 0:
-                master_ip = master
-		log_test.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
-		device_id = str(master_count[master]['devices'][0])
-		device_count = master_count[master]['size']
-        assert_equal(total_devices,onos_instances)
-	log_test.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
-	status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
-        assert_equal(status, True)
-        master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information after cluster mastership withdraw is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += int(master_count[master]['size'])
-            if master == master_ip:
-                assert_equal(master_count[master]['size'], device_count-1)
-        assert_equal(total_devices,onos_instances)
-
-    #pass
-    def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += master_count[master]['size']
-        assert_equal(total_devices,onos_instances)
-        log_test.info('Restarting cluster')
-	cord_test_onos_restart()
-	time.sleep(60)
-        master_count = self.get_number_of_devices_of_master()
-        log_test.info('Master count information after restart is %s'%master_count)
-        total_devices = 0
-        for master in master_count.keys():
-            total_devices += master_count[master]['size']
-        assert_equal(total_devices,onos_instances)
diff --git a/src/test/cordSubscriber/__init__.py b/src/test/cordSubscriber/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/cordSubscriber/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/cordSubscriber/cordSubscriberTest.json b/src/test/cordSubscriber/cordSubscriberTest.json
deleted file mode 100644
index 5f1bf21..0000000
--- a/src/test/cordSubscriber/cordSubscriberTest.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-    "VOLTHA_HOST": "172.17.0.1", 
-    "VOLTHA_REST_PORT": 8882, 
-    "VOLTHA_IGMP_ITERATIONS": 100, 
-    "VOLTHA_CONFIG_FAKE": true, 
-    "VOLTHA_OLT_TYPE": "ponsim_olt", 
-    "VOLTHA_OLT_MAC": "00:0c:e2:31:12:00", 
-    "VOLTHA_UPLINK_VLAN_START": 333, 
-    "VOLTHA_TEARDOWN": false
-}
\ No newline at end of file
diff --git a/src/test/cordSubscriber/cordSubscriberTest.py b/src/test/cordSubscriber/cordSubscriberTest.py
deleted file mode 100644
index 4fcd590..0000000
--- a/src/test/cordSubscriber/cordSubscriberTest.py
+++ /dev/null
@@ -1,3119 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from scapy.all import IP
-import time, monotonic
-import os, sys
-import tempfile
-import random
-import threading
-import json
-import requests
-from Stats import Stats
-from OnosCtrl import OnosCtrl
-from DHCP import DHCPTest
-from EapTLS import TLSAuthTest
-from Channels import Channels, IgmpChannel
-from subscriberDb import SubscriberDB
-from threadPool import ThreadPool
-from portmaps import g_subscriber_port_map
-from OltConfig import *
-from CordTestServer import cord_test_onos_restart, cord_test_shell, cord_test_radius_restart
-from CordTestUtils import log_test, get_controller
-from CordLogger import CordLogger
-from CordTestConfig import setup_module, teardown_module
-from CordContainer import Onos
-from VolthaCtrl import VolthaCtrl
-from CordTestUtils import get_mac, get_controller
-
-log_test.setLevel('INFO')
-
-class Subscriber(Channels):
-      PORT_TX_DEFAULT = 2
-      PORT_RX_DEFAULT = 1
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      STATS_RX = 0
-      STATS_TX = 1
-      STATS_JOIN = 2
-      STATS_LEAVE = 3
-      SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
-
-      def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
-                   num = 1, channel_start = 0,
-                   tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
-                   iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
-                   mcast_cb = None, loginType = 'wireless'):
-            self.tx_port = tx_port
-            self.rx_port = rx_port
-            self.port_map = port_map or g_subscriber_port_map
-            try:
-                  self.tx_intf = self.port_map[tx_port]
-                  self.rx_intf = self.port_map[rx_port]
-            except:
-                  self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-                  self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-
-            log_test.info('Subscriber %s, rx interface %s, uplink interface %s' %(name, self.rx_intf, self.tx_intf))
-            Channels.__init__(self, num, channel_start = channel_start,
-                              iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
-            self.name = name
-            self.service = service
-            self.service_map = {}
-            services = self.service.strip().split(' ')
-            for s in services:
-                  self.service_map[s] = True
-            self.loginType = loginType
-            ##start streaming channels
-            self.join_map = {}
-            ##accumulated join recv stats
-            self.join_rx_stats = Stats()
-            self.recv_timeout = False
-
-      def has_service(self, service):
-            if self.service_map.has_key(service):
-                  return self.service_map[service]
-            if self.service_map.has_key(service.upper()):
-                  return self.service_map[service.upper()]
-            return False
-
-      def channel_join_update(self, chan, join_time):
-            self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
-            self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
-
-      def channel_join(self, chan = 0, delay = 2):
-            '''Join a channel and create a send/recv stats map'''
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.delay = delay
-            chan, join_time = self.join(chan)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_join_next(self, delay = 2, leave_flag = True):
-            '''Joins the next channel leaving the last channel'''
-            if self.last_chan:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.join_next(leave_flag = leave_flag)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_jump(self, delay = 2):
-            '''Jumps randomly to the next channel leaving the last channel'''
-            if self.last_chan is not None:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.jump()
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_leave(self, chan = 0, force = False):
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.leave(chan, force = force)
-
-      def channel_update(self, chan, stats_type, packets, t=0):
-            if type(chan) == type(0):
-                  chan_list = (chan,)
-            else:
-                  chan_list = chan
-            for c in chan_list:
-                  if self.join_map.has_key(c):
-                        self.join_map[c][stats_type].update(packets = packets, t = t)
-
-      def channel_receive(self, chan, cb = None, count = 1, timeout = 5):
-            log_test.info('Subscriber %s on port %s receiving from group %s, channel %d' %
-                     (self.name, self.rx_intf, self.gaddr(chan), chan))
-            r = self.recv(chan, cb = cb, count = count, timeout = timeout)
-            if len(r) == 0:
-                  log_test.info('Subscriber %s on port %s timed out' %(self.name, self.rx_intf))
-            else:
-                  log_test.info('Subscriber %s on port %s received %d packets' %(self.name, self.rx_intf, len(r)))
-            if self.recv_timeout:
-                  ##Negative test case is disabled for now
-                  assert_equal(len(r), 0)
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            log_test.info('Packet received for group %s, subscriber %s, port %s' %
-                     (pkt[IP].dst, self.name, self.rx_intf))
-            if self.recv_timeout:
-                  return
-            chan = self.caddr(pkt[IP].dst)
-            assert_equal(chan in self.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.join_map[chan][self.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.channel_update(chan, self.STATS_RX, 1, t = delta)
-            log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-
-class subscriber_pool:
-
-      def __init__(self, subscriber, test_cbs):
-            self.subscriber = subscriber
-            self.test_cbs = test_cbs
-
-      def pool_cb(self):
-            for cb in self.test_cbs:
-                  if cb:
-                        self.test_status = cb(self.subscriber)
-                        if self.test_status is not True:
-                           ## This is chaning for other sub status has to check again
-                           self.test_status = True
-                           log_test.info('This service is failed and other services will not run for this subscriber')
-                           break
-            log_test.info('This Subscriber is tested for multiple service eligibility ')
-            self.test_status = True
-
-
-class subscriber_exchange(CordLogger):
-
-      apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
-      olt_apps = () #'org.opencord.cordmcast')
-      vtn_app = 'org.opencord.vtn'
-      table_app = 'org.ciena.cordigmp'
-      dhcp_server_config = {
-        "ip": "10.1.11.50",
-        "mac": "ca:fe:ca:fe:ca:fe",
-        "subnet": "255.255.252.0",
-        "broadcast": "10.1.11.255",
-        "router": "10.1.8.1",
-        "domain": "8.8.8.8",
-        "ttl": "63",
-        "delay": "2",
-        "startip": "10.1.11.51",
-        "endip": "10.1.11.100"
-      }
-
-      aaa_loaded = False
-      test_path = os.path.dirname(os.path.realpath(__file__))
-      table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
-      app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
-      olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-1.2-SNAPSHOT.oar')
-      app_files = [os.path.join(test_path, '..', 'apps/cord-config-3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/olt-app-3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/mcast-1.3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/onos-app-igmpproxy-1.1.0-SNAPSHOT.oar')]
-      proxy_config_file = os.path.join(test_path, '..', 'igmpproxy/igmpproxyconfig.json')
-      olt_app_name = 'org.onosproject.olt'
-      onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-      olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-      cpqd_path = os.path.join(test_path, '..', 'setup')
-      ovs_path = cpqd_path
-      test_services = ('IGMP', 'TRAFFIC')
-      num_joins = 0
-      num_subscribers = 0
-      leave_flag = True
-      num_channels = 0
-      recv_timeout = False
-      onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      SUBSCRIBER_TIMEOUT = 300
-      MAX_PORTS = 100
-      proxy_device_id = OnosCtrl.get_device_id()
-      controller = get_controller()
-      proxy_app = 'org.opencord.igmpproxy'
-      mcast_app = 'org.opencord.mcast'
-      cord_config_app = 'org.opencord.config'
-      host_ip_map = {}
-      configs = {}
-      proxy_interfaces_last = ()
-      interface_to_mac_map = {}
-
-
-      CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-      CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-
-      VOLTHA_HOST = None
-      VOLTHA_TEARDOWN = True
-      VOLTHA_REST_PORT = VolthaCtrl.REST_PORT
-      VOLTHA_UPLINK_VLAN_MAP = { 'of:0001000000000001' : '222' }
-      VOLTHA_UPLINK_VLAN_START = 333
-      VOLTHA_IGMP_ITERATIONS = 100
-      VOLTHA_CONFIG_FAKE = True
-      VOLTHA_OLT_TYPE = 'simulated_olt'
-      VOLTHA_OLT_MAC = '00:0c:e2:31:12:00'
-      VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-      voltha_ctrl = None
-      voltha_device = None
-      voltha_switch_map = None
-      voltha_preconfigured = False
-
-      @classmethod
-      def update_apps_version(cls):
-            version = Onos.getVersion()
-            major = int(version.split('.')[0])
-            minor = int(version.split('.')[1])
-            cordigmp_app_version = '2.0-SNAPSHOT'
-            olt_app_version = '1.2-SNAPSHOT'
-	    cord_config_app_version = '1.2-SNAPSHOT'
-            if major > 1:
-                  cordigmp_app_version = '3.0-SNAPSHOT'
-                  olt_app_version = '2.0-SNAPSHOT'
-		  cord_config_app_version = '2.0-SNAPSHOT'
-            elif major == 1:
-                  if minor > 10:
-                        cordigmp_app_version = '3.0-SNAPSHOT'
-                        olt_app_version = '2.0-SNAPSHOT'
-                  elif minor <= 8:
-                        olt_app_version = '1.1-SNAPSHOT'
-	    cls.cord_config_app_file = os.path.join(cls.test_path, '..', 'apps/cord-config-{}.oar'.format(cord_config_app_version))
-            cls.app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-{}.oar'.format(cordigmp_app_version))
-            cls.table_app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-multitable-{}.oar'.format(cordigmp_app_version))
-            cls.olt_app_file = os.path.join(cls.test_path, '..', 'apps/olt-app-{}.oar'.format(olt_app_version))
-	    cls.updated_app_files = [cls.cord_config_app_file,cls.app_file,cls.table_app_file,cls.olt_app_file]
-
-      @classmethod
-      def load_device_id(cls):
-            '''Configure the device id'''
-            did = OnosCtrl.get_device_id()
-            #Set the default config
-            cls.device_id = did
-            cls.device_dict = { "devices" : {
-                        "{}".format(did) : {
-                              "basic" : {
-                                    "driver" : "voltha"
-                                    }
-                              }
-                        },
-                  }
-            return did
-
-      @classmethod
-      def setUpClass(cls):
-          '''Load the OLT config and activate relevant apps'''
-          cls.update_apps_version()
-          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-          if cls.VOLTHA_ENABLED is False:
-                OnosCtrl.config_device_driver()
-                OnosCtrl.cord_olt_config(cls.olt)
-          cls.port_map, cls.port_list = cls.olt.olt_port_map()
-          cls.switches = cls.port_map['switches']
-          cls.num_ports = cls.port_map['num_ports']
-          if cls.num_ports > 1:
-                cls.num_ports -= 1 ##account for the tx port
-          cls.activate_apps(cls.apps + cls.olt_apps, deactivate = True)
-
-      @classmethod
-      def tearDownClass(cls):
-          '''Deactivate the olt apps and restart OVS back'''
-          apps = cls.olt_apps
-          for app in apps:
-              onos_ctrl = OnosCtrl(app)
-              onos_ctrl.deactivate()
-          if cls.VOLTHA_ENABLED is False:
-                OnosCtrl.config_device_driver(driver = 'ovs')
-	  #cls.igmp_proxy_teardown()
-	  #Onos.install_cord_apps()
-	  #OnosCtrl.install_app(cls.table_app_file,onos_ip=cls.controller)
-
-      @classmethod
-      def activate_apps(cls, apps, deactivate = False):
-            for app in apps:
-                  onos_ctrl = OnosCtrl(app)
-                  if deactivate is True:
-                        onos_ctrl.deactivate()
-                        time.sleep(2)
-                  status, _ = onos_ctrl.activate()
-                  assert_equal(status, True)
-                  time.sleep(2)
-
-      @classmethod
-	  log_test.info('In igmp proxy setup function ***************')
-      def igmpproxy_setup(cls,FastLeave='false'):
-          cls.uninstall_cord_config_app()
-          time.sleep(1)
-          cls.install_igmpproxy()
-          cls.igmp_proxy_setup()
-          cls.onos_igmp_proxy_config_load(FastLeave=FastLeave)
-
-      @classmethod
-      def igmp_proxy_teardown(cls):
-          ##reset the ONOS port configuration back to default
-          for config in cls.configs.items():
-              OnosCtrl.delete(config)
-          cls.uninstall_cord_config_app()
-	  Onos.install_cord_apps()
-	  OnosCtrl.install_app(cls.table_app_file,onos_ip=cls.controller)
-          #for app_file in cls.updated_app_files:
-          #OnosCtrl.install_app(cls.table_app_file)
-          #OnosCtrl.install_app(table_app_file)
-          #OnosCtrl.install_app(olt_app_file)
-
-      @classmethod
-      def uninstall_cord_config_app(cls):
-          log_test.info('Uninstalling org.opencord.config 1.2 version app')
-          OnosCtrl(cls.cord_config_app).deactivate()
-          OnosCtrl.uninstall_app(cls.cord_config_app, onos_ip = cls.controller)
-
-      @classmethod
-      def install_igmpproxy(cls):
-	  log_test.info('In install igmp proxy function ***************')
-          for app in cls.app_files:
-              OnosCtrl.install_app(app, onos_ip = cls.controller)
-              OnosCtrl(app).activate()
-
-      @classmethod
-      def igmp_proxy_setup(cls):
-          did =  OnosCtrl.get_device_id()
-          cls.proxy_device_id = did
-          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-          cls.port_map, _ = cls.olt.olt_port_map()
-          #log_test.info('port map is %s'%cls.port_map)
-          if cls.port_map:
-              ##Per subscriber, we use 1 relay port
-              try:
-                  proxy_port = cls.port_map[cls.port_map['relay_ports'][0]]
-              except:
-                  proxy_port = cls.port_map['uplink']
-              cls.proxy_interface_port = proxy_port
-              cls.proxy_interfaces = (cls.port_map[cls.proxy_interface_port],)
-          else:
-              cls.proxy_interface_port = 100
-              cls.proxy_interfaces = (g_subscriber_port_map[cls.proxy_interface_port],)
-          cls.proxy_interfaces_last = cls.proxy_interfaces
-          if cls.port_map:
-              ##generate a ip/mac client virtual interface config for onos
-              interface_list = []
-              for port in cls.port_map['ports']:
-                  port_num = cls.port_map[port]
-                  if port_num == cls.port_map['uplink']:
-                      continue
-                  ip = cls.get_host_ip(port_num)
-                  mac = cls.get_mac(port)
-                  interface_list.append((port_num, ip, mac))
-              #configure igmp proxy  virtual interface
-              proxy_ip = cls.get_host_ip(interface_list[0][0])
-              proxy_mac = cls.get_mac(cls.port_map[cls.proxy_interface_port])
-              interface_list.append((cls.proxy_interface_port, proxy_ip, proxy_mac))
-              cls.onos_interface_load(interface_list)
-
-      @classmethod
-      def onos_interface_load(cls, interface_list):
-          interface_dict = { 'ports': {} }
-          for port_num, ip, mac in interface_list:
-              port_map = interface_dict['ports']
-              port = '{}/{}'.format(cls.proxy_device_id, port_num)
-              port_map[port] = { 'interfaces': [] }
-              interface_list = port_map[port]['interfaces']
-              interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-              interface_list.append(interface_map)
-          #cls.onos_load_config(interface_dict)
-          cls.configs['interface_config'] = interface_dict
-
-
-      @classmethod
-      def onos_igmp_proxy_config_load(cls, FastLeave = "false"):
-          #cls.proxy_interface_port = 12
-          proxy_connect_point = '{}/{}'.format(cls.proxy_device_id, cls.proxy_interface_port)
-          igmpproxy_dict = { "apps": {
-                "org.onosproject.provider.lldp": {
-                        "suppression": {
-                                "deviceTypes": ["ROADM"],
-                                "annotation": "{\"no-lldp\":null}"
-                        }
-                },
-                "org.opencord.igmpproxy": {
-                        "igmpproxy": {
-                                "globalConnectPointMode": "true",
-                                "globalConnectPoint": proxy_connect_point,
-                                "UnsolicitedTimeOut": "2",
-                                "MaxResp": "10",
-                                "KeepAliveInterval": "120",
-                                "KeepAliveCount": "3",
-                                "LastQueryInterval": "2",
-                                "LastQueryCount": "2",
-                                "FastLeave": FastLeave,
-                                "PeriodicQuery": "true",
-                                "IgmpCos": "7",
-                                "withRAUpLink": "true",
-                                "withRADownLink": "true"
-                        }
-                },
-                "org.opencord.mcast": {
-                        "multicast": {
-                                "ingressVlan": "222",
-                                "egressVlan": "17"
-                        }
-                }
-             }
-          }
-          device_dict = {'devices':{
-                           cls.proxy_device_id: {
-                               'basic': {
-                                   'driver': 'default'
-                                },
-                                'accessDevice': {
-                                   'uplink': '2',
-                                   'vlan': '222',
-                                   'defaultVlan': '1'
-                                   }
-                                }
-                            }
-                      }
-          log_test.info('Igmp proxy dict is %s'%igmpproxy_dict)
-          cls.onos_load_config("org.opencord.igmpproxy",igmpproxy_dict)
-          cls.onos_load_config("org.opencord.igmpproxy",device_dict)
-          cls.configs['relay_config'] = igmpproxy_dict
-          cls.configs['device_config'] = device_dict
-
-      def random_mcast_ip(self,start_ip = '224.1.1.1', end_ip = '224.1.254.254'):
-          start = list(map(int, start_ip.split(".")))
-          end = list(map(int, end_ip.split(".")))
-          temp = start
-          ip_range = []
-          ip_range.append(start_ip)
-          while temp != end:
-              start[3] += 1
-              for i in (3, 2, 1):
-                  if temp[i] == 255:
-                      temp[i] = 0
-                      temp[i-1] += 1
-              ip_range.append(".".join(map(str, temp)))
-          return random.choice(ip_range)
-
-      def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-          start = list(map(int, start_ip.split(".")))
-          end = list(map(int, end_ip.split(".")))
-          temp = start
-          ip_range = []
-          ip_range.append(start_ip)
-          while temp != end:
-              start[3] += 1
-              for i in (3, 2, 1):
-                  if temp[i] == 255:
-                      temp[i] = 0
-                      temp[i-1] += 1
-              ip_range.append(".".join(map(str, temp)))
-          return random.choice(ip_range)
-
-
-      @classmethod
-      def get_host_ip(cls, port):
-          if cls.host_ip_map.has_key(port):
-              return cls.host_ip_map[port]
-          cls.host_ip_map[port] = '192.168.1.{}'.format(port)
-          return cls.host_ip_map[port]
-
-      @classmethod
-      def get_mac(cls, iface):
-          if cls.interface_to_mac_map.has_key(iface):
-              return cls.interface_to_mac_map[iface]
-          mac = get_mac(iface, pad = 0)
-          cls.interface_to_mac_map[iface] = mac
-          return mac
-
-      @classmethod
-      def start_onos(cls, network_cfg = None):
-            if cls.onos_restartable is False:
-                  log_test.info('ONOS restart is disabled. Skipping ONOS restart')
-                  return
-            if cls.VOLTHA_ENABLED is True:
-                  log_test.info('ONOS restart skipped as VOLTHA is running')
-                  return
-            if network_cfg is None:
-                  network_cfg = cls.device_dict
-
-            if type(network_cfg) is tuple:
-                  res = []
-                  for v in network_cfg:
-                        res += v.items()
-                  config = dict(res)
-            else:
-                  config = network_cfg
-            log_test.info('Restarting ONOS with new network configuration')
-            return cord_test_onos_restart(config = config)
-
-      @classmethod
-      def remove_onos_config(cls):
-            try:
-                  os.unlink('{}/network-cfg.json'.format(cls.onos_config_path))
-            except: pass
-
-      @classmethod
-      def start_cpqd(cls, mac = '00:11:22:33:44:55'):
-            dpid = mac.replace(':', '')
-            cpqd_file = os.sep.join( (cls.cpqd_path, 'cpqd.sh') )
-            cpqd_cmd = '{} {}'.format(cpqd_file, dpid)
-            ret = os.system(cpqd_cmd)
-            assert_equal(ret, 0)
-            time.sleep(10)
-            device_id = 'of:{}{}'.format('0'*4, dpid)
-            return device_id
-
-      @classmethod
-      def start_ovs(cls):
-            ovs_file = os.sep.join( (cls.ovs_path, 'of-bridge.sh') )
-            ret = os.system(ovs_file)
-            assert_equal(ret, 0)
-            time.sleep(30)
-
-      @classmethod
-      def ovs_cleanup(cls):
-            ##For every test case, delete all the OVS groups
-            cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
-            try:
-                  cord_test_shell(cmd)
-                  ##Since olt config is used for this test, we just fire a careless local cmd as well
-                  os.system(cmd)
-            finally:
-                  return
-
-      @classmethod
-      def onos_aaa_load(cls):
-            if cls.aaa_loaded:
-                  return
-            aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
-                                                                    'radiusIp': '172.17.0.2' } } } }
-            radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
-            aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
-            cls.onos_load_config('org.opencord.aaa', aaa_dict)
-            cls.aaa_loaded = True
-
-      @classmethod
-      def onos_dhcp_table_load(cls, config = None):
-          dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(cls.dhcp_server_config) } } }
-          dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
-          if config:
-              for k in config.keys():
-                  if dhcp_config.has_key(k):
-                      dhcp_config[k] = config[k]
-          cls.onos_load_config('org.onosproject.dhcp', dhcp_dict)
-
-      @classmethod
-      def onos_load_config(cls, app, config):
-          status, code = OnosCtrl.config(config)
-          if status is False:
-             log_test.info('JSON config request for app %s returned status %d' %(app, code))
-             assert_equal(status, True)
-          time.sleep(2)
-
-      def dhcp_sndrcv(self, dhcp, update_seed = False):
-            cip, sip = dhcp.discover(update_seed = update_seed)
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-            log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                     (cip, sip, dhcp.get_mac(cip)[0]))
-            return cip,sip
-
-      def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
-            config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
-                      'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                      'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-            self.onos_dhcp_table_load(config)
-            dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
-            cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
-            return cip, sip
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            chan = self.subscriber.caddr(pkt[IP].dst)
-            assert_equal(chan in self.subscriber.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
-            log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-            self.test_status = True
-
-      def traffic_verify(self, subscriber):
-            if subscriber.has_service('TRAFFIC'):
-                  url = 'http://www.google.com'
-                  resp = requests.get(url)
-                  self.test_status = resp.ok
-                  if resp.ok == False:
-                        log_test.info('Subscriber %s failed get from url %s with status code %d'
-                                 %(subscriber.name, url, resp.status_code))
-                  else:
-                        log_test.info('GET request from %s succeeded for subscriber %s'
-                                 %(url, subscriber.name))
-                  return self.test_status
-
-      def tls_verify(self, subscriber):
-            def tls_fail_cb():
-                  log_test.info('TLS verification failed')
-            if subscriber.has_service('TLS'):
-                  #OnosCtrl('org.opencord.aaa').deactivate()
-                  #time.sleep(2)
-                  #OnosCtrl('org.opencord.aaa').activate()
-                  #time.sleep(5)
-                  tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
-                  log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-                  tls.runTest()
-                  assert_equal(tls.failTest, False)
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, update_seed = True)
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_jump_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_next_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_verify(self, subscriber):
-            chan = 0
-            if subscriber.has_service('IGMP'):
-                  ##We wait for all the subscribers to join before triggering leaves
-                  if subscriber.rx_port > 1:
-                        time.sleep(5)
-                  subscriber.channel_join(chan, delay = 0)
-                  self.num_joins += 1
-                  while self.num_joins < self.num_subscribers:
-                        time.sleep(5)
-                  log_test.info('All subscribers have joined the channel')
-                  for i in range(10):
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        log_test.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_leave(chan)
-                        time.sleep(5)
-                        log_test.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
-                        #Should not receive packets for this subscriber
-                        self.recv_timeout = True
-                        subscriber.recv_timeout = True
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        subscriber.recv_timeout = False
-                        self.recv_timeout = False
-                        log_test.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_join(chan, delay = 0)
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_jump_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        log_test.info('Subscriber %s jumping channel' %subscriber.name)
-                        chan = subscriber.channel_jump(delay=0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_next_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        if i:
-                              chan = subscriber.channel_join_next(delay=0, leave_flag = self.leave_flag)
-                        else:
-                              chan = subscriber.channel_join(i, delay=0)
-                        log_test.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-                        log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log_test.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def voltha_igmp_next_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for c in xrange(self.VOLTHA_IGMP_ITERATIONS):
-                        for i in xrange(subscriber.num):
-                              if i:
-                                    chan = subscriber.channel_join_next(delay=0, leave_flag = self.leave_flag)
-                                    time.sleep(0.2)
-                              else:
-                                    chan = subscriber.channel_join(i, delay=0)
-                                    time.sleep(0.2)
-                                    if subscriber.num == 1:
-                                          subscriber.channel_leave(chan)
-                              log_test.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
-                              #subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-                              #log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_leave_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for chan in xrange(subscriber.num):
-                        subscriber.channel_leave(chan, force = True)
-                        time.sleep(2)
-                        log_test.info('Left channel %d for subscriber %s' %(chan, subscriber.name))
-                        #self.recv_timeout = True
-                        #subscriber.recv_timeout = True
-                        #subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-                        #self.recv_timeout = False
-                        #subscriber.recv_timeout = False
-                        #log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        #time.sleep(1)
-
-                  self.test_status = True
-                  return self.test_status
-
-      def generate_port_list(self, subscribers, channels):
-            return self.port_list[:subscribers]
-
-      def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = [], services = None):
-            '''Load the subscriber from the database'''
-            test_services = services if services else self.test_services
-            self.subscriber_db = SubscriberDB(create = create, services = test_services)
-            if create is True:
-                  self.subscriber_db.generate(num)
-            self.subscriber_info = self.subscriber_db.read(num)
-            self.subscriber_list = []
-            if not port_list:
-                  port_list = self.generate_port_list(num, num_channels)
-
-            index = 0
-            for info in self.subscriber_info:
-                  self.subscriber_list.append(Subscriber(name=info['Name'],
-                                                         service=info['Service'],
-                                                         port_map = self.port_map,
-                                                         num=num_channels,
-                                                         channel_start = channel_start,
-                                                         tx_port = port_list[index][0],
-                                                         rx_port = port_list[index][1]))
-                  if num_channels > 1:
-                        channel_start += num_channels
-                  index += 1
-
-            #load the ssm list for all subscriber channels
-            igmpChannel = IgmpChannel()
-            ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
-            ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
-            igmpChannel.igmp_load_ssm_config(ssm_list)
-
-      def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
-                                  channel_start = 0, cbs = None, port_list = [],
-                                  services = None, negative_subscriber_auth = None):
-          self.test_status = False
-          self.ovs_cleanup()
-          subscribers_count = num_subscribers
-          sub_loop_count =  num_subscribers
-          self.subscriber_load(create = True, num = num_subscribers,
-                               num_channels = num_channels, channel_start = channel_start, port_list = port_list,
-                               services = services)
-          self.onos_aaa_load()
-          self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
-
-          chan_leave = False #for single channel, multiple subscribers
-          if cbs is None:
-                cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                chan_leave = True
-          cbs_negative = cbs
-          for subscriber in self.subscriber_list:
-                if services and 'IGMP' in services:
-                   subscriber.start()
-                if negative_subscriber_auth is 'half' and sub_loop_count%2 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                elif negative_subscriber_auth is 'onethird' and sub_loop_count%3 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                else:
-                   cbs = cbs_negative
-                sub_loop_count = sub_loop_count - 1
-                pool_object = subscriber_pool(subscriber, cbs)
-                self.thread_pool.addTask(pool_object.pool_cb)
-          self.thread_pool.cleanUpThreads()
-          for subscriber in self.subscriber_list:
-                if services and 'IGMP' in services:
-                   subscriber.stop()
-                if chan_leave is True:
-                      subscriber.channel_leave(0)
-          subscribers_count = 0
-          return self.test_status
-
-      def tls_invalid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_no_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = '')
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_self_signed_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_non_ca_authrized_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_NON_CA_AUTHORIZED)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_Nsubscribers_use_same_valid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             num_users = 3
-             for i in xrange(num_users):
-                 tls = TLSAuthTest(intf = 'veth{}'.format(i*2))
-                 tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def dhcp_discover_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-             t1 = self.subscriber_dhcp_1release()
-             self.test_status = True
-             return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_1release(self, iface = INTF_RX_DEFAULT):
-            config = {'startip':'10.10.100.20', 'endip':'10.10.100.21',
-                      'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
-                      'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
-            self.onos_dhcp_table_load(config)
-            self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-            cip, sip = self.send_recv()
-            log_test.info('Releasing ip %s to server %s' %(cip, sip))
-            assert_equal(self.dhcp.release(cip), True)
-            log_test.info('Triggering DHCP discover again after release')
-            cip2, sip2 = self.send_recv(update_seed = True)
-            log_test.info('Verifying released IP was given back on rediscover')
-            assert_equal(cip, cip2)
-            log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-            assert_equal(self.dhcp.release(cip2), True)
-
-      def dhcp_client_reboot_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_request_after_reboot()
-                  self.test_status = True
-                  return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_request_after_reboot(self, iface = INTF_RX_DEFAULT):
-          #''' Client sends DHCP Request after reboot.'''
-
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                os.system('ifconfig '+iface+' down')
-                log_test.info('Client goes down.')
-                log_test.info('Delay for 5 seconds.')
-
-                time.sleep(5)
-
-                os.system('ifconfig '+iface+' up')
-                log_test.info('Client is up now.')
-
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                elif new_cip != None:
-                        log_test.info("Got DHCP ACK.")
-
-      def dhcp_client_renew_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_renew_time()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_renew_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log_test.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-                if new_cip and new_sip and lval:
-                        log_test.info("Client 's Renewal time is :%s",lval)
-                        log_test.info("Generating delay till renewal time.")
-                        time.sleep(lval)
-                        log_test.info("Client Sending Unicast DHCP request.")
-                        latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-                        if latest_cip and latest_sip:
-                                log_test.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                (latest_cip, mac, latest_sip) )
-
-                        elif latest_cip == None:
-                                log_test.info("Got DHCP NAK. Lease not renewed.")
-                elif new_cip == None or new_sip == None or lval == None:
-                        log_test.info("Got DHCP NAK.")
-
-      def dhcp_server_reboot_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_server_after_reboot()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_server_after_reboot(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP server goes down.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                log_test.info('Getting DHCP server Down.')
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                onos_ctrl.deactivate()
-                for i in range(0,4):
-                        log_test.info("Sending DHCP Request.")
-                        log_test.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log_test.info('')
-                                log_test.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log_test.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-                log_test.info('Getting DHCP server Up.')
-#               self.activate_apps(self.dhcp_app)
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                status, _ = onos_ctrl.activate()
-                assert_equal(status, True)
-                time.sleep(3)
-                for i in range(0,4):
-                        log_test.info("Sending DHCP Request after DHCP server is up.")
-                        log_test.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log_test.info('')
-                                log_test.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log_test.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-
-      def dhcp_client_rebind_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_rebind_time()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_rebind_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log_test.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-                if new_cip and new_sip and lval:
-                        log_test.info("Client 's Rebind time is :%s",lval)
-                        log_test.info("Generating delay till rebind time.")
-                        time.sleep(lval)
-                        log_test.info("Client Sending broadcast DHCP requests for renewing lease or for getting new ip.")
-                        self.dhcp.after_T2 = True
-                        for i in range(0,4):
-                                latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-                                if latest_cip and latest_sip:
-                                        log_test.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                        (latest_cip, mac, latest_sip) )
-                                        break
-                                elif latest_cip == None:
-                                        log_test.info("Got DHCP NAK. Lease not renewed.")
-                        assert_not_equal(latest_cip, None)
-                elif new_cip == None or new_sip == None or lval == None:
-                        log_test.info("Got DHCP NAK.Lease not Renewed.")
-
-      def dhcp_starvation_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_starvation()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_starvation(self, iface = INTF_RX_DEFAULT):
-          '''DHCP starve'''
-          config = {'startip':'182.17.0.20', 'endip':'182.17.0.69',
-                    'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-          log_test.info('Verifying 1 ')
-          for x in xrange(50):
-              mac = RandMAC()._fix()
-              self.send_recv(mac = mac)
-          log_test.info('Verifying 2 ')
-          cip, sip = self.send_recv(update_seed = True, validate = False)
-          assert_equal(cip, None)
-          assert_equal(sip, None)
-
-      def dhcp_same_client_multi_discovers_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_same_client_multiple_discover()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_same_client_multiple_discover(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple discover . '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-                  (cip, sip, mac) )
-          log_test.info('Triggering DHCP discover again.')
-          new_cip, new_sip, new_mac , lval = self.dhcp.only_discover()
-          if cip == new_cip:
-                 log_test.info('Got same ip for 2nd DHCP discover for client IP %s from server %s for mac %s. Triggering DHCP Request. '
-                          % (new_cip, new_sip, new_mac) )
-          elif cip != new_cip:
-                log_test.info('Ip after 1st discover %s' %cip)
-                log_test.info('Map after 2nd discover %s' %new_cip)
-                assert_equal(cip, new_cip)
-
-      def dhcp_same_client_multi_request_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_same_client_multiple_request()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_same_client_multiple_request(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple repeat DHCP requests. '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          log_test.info('Sending DHCP discover and DHCP request.')
-          cip, sip = self.send_recv()
-          mac = self.dhcp.get_mac(cip)[0]
-          log_test.info("Sending DHCP request again.")
-          new_cip, new_sip = self.dhcp.only_request(cip, mac)
-          if (new_cip,new_sip) == (cip,sip):
-                log_test.info('Got same ip for 2nd DHCP Request for client IP %s from server %s for mac %s.'
-                          % (new_cip, new_sip, mac) )
-          elif (new_cip,new_sip):
-                log_test.info('No DHCP ACK')
-                assert_equal(new_cip, None)
-                assert_equal(new_sip, None)
-          else:
-                print "Something went wrong."
-
-      def dhcp_client_desired_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_desired_address()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_desired_address(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.31', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover(desired = True)
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-          elif cip != self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log_test.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_equal(cip, self.dhcp.seed_ip)
-
-      def dhcp_client_request_pkt_with_non_offered_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_server_nak_packet()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_server_nak_packet(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_equal(new_cip, None)  #Negative Test Case
-
-      def dhcp_client_requested_out_pool_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_desired_address_out_of_pool()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_desired_address_out_of_pool(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address from out of pool.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover(desired = True)
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-                assert_equal(cip, self.dhcp.seed_ip) #Negative Test Case
-
-          elif cip != self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log_test.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_not_equal(cip, self.dhcp.seed_ip)
-
-          elif cip == None:
-                log_test.info('Got DHCP NAK')
-
-      def dhcp_client_specific_lease_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_specific_lease_packet()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_specific_lease_packet(self, iface = INTF_RX_DEFAULT):
-          ''' Client sends DHCP Discover packet for particular lease time.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          log_test.info('Sending DHCP discover with lease time of 700')
-          cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True)
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif lval != 700:
-                log_test.info('Getting dhcp client IP %s from server %s for mac %s with lease time %s. That is not 700.' %
-                         (cip, sip, mac, lval) )
-                assert_not_equal(lval, 700)
-
-      def test_cord_subscriber_for_joining_channel_and_receiving_stream(self):
-          """Test subscriber join and receive for channel surfing"""
-          self.num_subscribers = 5
-          self.num_channels = 1
-	  self.igmpproxy_setup()
-          test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-	  test_status = True
-          assert_equal(test_status, True)
-	  self.igmp_proxy_teardown()
-
-      def test_cord_subscriber_for_joining_channel_validating_stream_and_jumping_channel(self):
-          """Test subscriber join jump for channel surfing"""
-          self.num_subscribers = self.num_ports * len(self.switches)
-          self.num_channels = 10
-          test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                           self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_for_joining_channel_validating_stream_and_joining_next_channel(self):
-          """Test subscriber join next for channel surfing"""
-          self.num_subscribers = self.num_ports * len(self.switches)
-          self.num_channels = 10
-          test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_for_joining_channel_validating_stream_and_joining_next_channel_without_leave_for_last_channel(self):
-          """Test subscriber join next for channel surfing"""
-          self.num_subscribers = self.num_ports * len(self.switches)
-          self.num_channels = 5
-          self.leave_flag = False
-          test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-          self.leave_flag = True
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_for_channel_leave(self):
-          """Test subscriber leaves for all the join nexts before"""
-          self.num_subscribers = self.num_ports * len(self.switches)
-          self.num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_leave_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-          assert_equal(test_status, True)
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      def test_cord_subscriber_authentication_with_invalid_certificate_validating_channel_surfing(self):
-          ### """Test subscriber to auth with invalidCertification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_invalid_cert, self.dhcp_verify,
-                                                                   self.igmp_verify, self.traffic_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                  negative_subscriber_auth = 'all')
-              assert_equal(test_status, False)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      def test_cord_subscriber_authentication_with_no_certificate_validating_channel_surfing(self):
-          ### """Test subscriber to auth with No Certification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify,
-                                                               self.igmp_verify, self.traffic_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'all')
-              assert_equal(test_status, False)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-      def test_cord_subscriber_authentication_with_self_signed_certificate_validating_channel_surfing(self):
-          ### """Test subscriber to auth with Self Signed Certification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                           num_channels = num_channels,
-                                           cbs = (self.tls_self_signed_cert, self.dhcp_verify,
-                                                          self.igmp_verify, self.traffic_verify),
-                                           port_list = self.generate_port_list(num_subscribers, num_channels),
-                                           negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_2_cord_subscribers_authentication_with_valid_and_invalid_certificates_validating_channel_surfing(self):
-          ### """Test 2 subscribers to auth, one of the subscriber with invalidCertification and join channel"""
-          num_subscribers = 2
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_invalid_cert, self.dhcp_verify,self.igmp_verify, self.traffic_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                  negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_2_cord_subscribers_authentication_with_valid_and_no_certificate_scenario_validating_channel_surfing(self):
-          ### """Test 2 subscribers to auth, one of the subscriber with No Certification and join channel"""
-          num_subscribers = 2
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify,
-                                                                self.igmp_verify, self.traffic_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_2_cord_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_validating_channel_surfing(self):
-          ### """Test 2 subscribers to auth, one of the subscriber with Non CA authorized Certificate and join channel"""
-          num_subscribers = 2
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                 num_channels = num_channels,
-                                                 cbs = (self.tls_non_ca_authrized_cert, self.dhcp_verify,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                                 port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                 negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      def test_cord_subscriber_authentication_with_dhcp_discover_validating_channel_surfing(self):
-          ### """Test subscriber auth success, DHCP re-discover with DHCP server and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                   num_channels = num_channels,
-                                                   cbs = (self.tls_verify, self.dhcp_discover_scenario,
-                                                                   self.igmp_verify, self.traffic_verify),
-                                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_client_reboot_validating_channel_surfing(self):
-          ### """Test subscriber auth success, DHCP client got re-booted and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_reboot_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_server_reboot_validating_channel_surfing(self):
-          ### """Test subscriber auth , DHCP server re-boot during DHCP process and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                              num_channels = num_channels,
-                                              cbs = (self.tls_verify, self.dhcp_server_reboot_scenario,
-                                                                   self.igmp_verify, self.traffic_verify),
-                                              port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_client_rebind_validating_channel_surfing(self):
-          ### """Test subscriber auth , DHCP client rebind IP and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_rebind_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_cord_subscriber_authentication_with_dhcp_starvation_validating_channel_surfing(self):
-          ### """Test subscriber auth , DHCP starvation and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_starvation_scenario,
-                                                                  self.igmp_verify, self.traffic_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_multiple_dhcp_discover_for_same_subscriber_validating_channel_surfing(self):
-          ### """Test subscriber auth , sending same DHCP client discover multiple times and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                   num_channels = num_channels,
-                                   cbs = (self.tls_verify, self.dhcp_same_client_multi_discovers_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_multiple_dhcp_request_for_same_subscriber_validating_channel_surfing(self):
-          ### """Test subscriber auth , same DHCP client multiple requerts times and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                     num_channels = num_channels,
-                                     cbs = (self.tls_verify, self.dhcp_same_client_multi_request_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                     port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_client_requested_ip_validating_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client requesting ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                     num_channels = num_channels,
-                                     cbs = (self.tls_verify, self.dhcp_client_desired_ip_scenario,
-                                                              self.igmp_verify, self.traffic_verify),
-                                     port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_non_offered_ip_validating_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client request for non-offered ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                        num_channels = num_channels,
-                        cbs = (self.tls_verify, self.dhcp_client_request_pkt_with_non_offered_ip_scenario,
-                                                                   self.igmp_verify, self.traffic_verify),
-                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_authentication_with_dhcp_request_out_of_pool_ip_by_client_validating_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client requesting out of pool ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_verify, self.dhcp_client_requested_out_pool_ip_scenario,
-                                                              self.igmp_verify, self.traffic_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_cord_subscriber_authentication_with_dhcp_specified_lease_time_functionality_validating_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client specifying lease time and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_verify, self.dhcp_client_specific_lease_scenario,
-                                            self.igmp_verify, self.traffic_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_1k_subscribers_authentication_with_valid_and_invalid_certificates_validating_channel_surfing(self):
-          ### """Test 1k subscribers to auth, half of the subscribers with invalidCertification and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_invalid_cert, self.dhcp_verify,
-                                                                   self.igmp_verify, self.traffic_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                  negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      @nottest
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_1k_subscribers_authentication_with_valid_and_no_certificates_validating_channel_surfing(self):
-          ### """Test 1k subscribers to auth, half of the subscribers with No Certification and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_1k_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_validating_channel_surfing(self):
-          ### """Test 1k subscribers to auth, half of the subscribers with Non CA authorized Certificate and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                 num_channels = num_channels,
-                                                 cbs = (self.tls_non_ca_authrized_cert, self.dhcp_verify, self.igmp_verify),
-                                                 port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                 negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_5k_subscribers_authentication_with_valid_and_invalid_certificates_validating_channel_surfing(self):
-          ### """Test 5k subscribers to auth, half of the subscribers with invalidCertification and join channel"""
-          num_subscribers = 5000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_invalid_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_5k_subscribers_authentication_with_valid_and_no_certificates_validating_channel_surfing(self):
-          ### """Test 5k subscribers to auth, half of the subscribers with No Certification and join channel"""
-          num_subscribers = 5000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_5k_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_validating_channel_surfing(self):
-          ### """Test 5k subscribers to auth, half of the subscribers with Non CA authorized Certificate and join channel"""
-          num_subscribers = 5000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                 num_channels = num_channels,
-                                                 cbs = (self.tls_non_ca_authrized_cert, self.dhcp_verify, self.igmp_verify),
-                                                 port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                 negative_subscriber_auth = 'half')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_10k_subscribers_authentication_with_valid_and_invalid_certificates_validating_channel_surfing(self):
-          ### """Test 10k subscribers to auth, half of the subscribers with invalidCertification and join channel"""
-          num_subscribers = 10000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_invalid_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'onethird')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_10k_subscribers_authentication_with_valid_and_no_certificates_validating_channel_surfing(self):
-          ### """Test 10k subscribers to auth, half of the subscribers with No Certification and join channel"""
-          num_subscribers = 10000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'onethird')
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      @nottest
-      def test_10k_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_validating_channel_surfing(self):
-          ### """Test 10k subscribers to auth, half of the subscribers with Non CA authorized Certificate and join channel"""
-          num_subscribers = 10000
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                 num_channels = num_channels,
-                                                 cbs = (self.tls_non_ca_authrized_cert, self.dhcp_verify, self.igmp_verify),
-                                                 port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                 negative_subscriber_auth = 'onethird')
-              assert_equal(test_status, False)
-              assert_equal(test_status, True)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_discovers_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth success, DHCP re-discover with DHCP server and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                   num_channels = num_channels,
-                                                   cbs = (self.tls_verify, self.dhcp_discover_scenario, self.igmp_verify),
-                                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_client_reboot_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth success, DHCP client got re-booted and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_reboot_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_server_reboot_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth , DHCP server re-boot during DHCP process and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                              num_channels = num_channels,
-                                              cbs = (self.tls_verify, self.dhcp_server_reboot_scenario, self.igmp_verify),
-                                              port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_client_rebind_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth , DHCP client rebind IP and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_rebind_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_starvation_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth , DHCP starvation and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_starvation_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_client_requested_ip_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth with DHCP client requesting ip and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                     num_channels = num_channels,
-                                     cbs = (self.tls_verify, self.dhcp_client_desired_ip_scenario, self.igmp_verify),
-                                     port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_non_offered_ip_validating_channel_surfing(self):
-          ### """Test subscribers auth with DHCP client request for non-offered ip and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                        num_channels = num_channels,
-                        cbs = (self.tls_verify, self.dhcp_client_request_pkt_with_non_offered_ip_scenario, self.igmp_verify),
-                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_4_cord_subscribers_join_recv_5channel(self):
-          ###"""Test 4 subscribers join and receive for 5 channels surfing"""
-          num_subscribers = 4
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_4_cord_subscribers_join_jump_5channel(self):
-          ###"""Test 4 subscribers jump and receive for 5 channels surfing"""
-          num_subscribers = 4
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_4_cord_subscribers_join_next_5channel(self):
-          ###"""Test 4 subscribers join next for 5 channels"""
-          num_subscribers = 4
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_10_cord_subscribers_join_recv_5channel(self):
-          ###"""Test 10 subscribers join and receive for 5 channels surfing"""
-          num_subscribers = 10
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_10_cord_subscribers_join_jump_5channel(self):
-          ###"""Test 10 subscribers jump and receive for 5 channels surfing"""
-          num_subscribers = 10
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_10_cord_subscribers_join_next_5channel(self):
-          ###"""Test 10 subscribers join next for 5 channels"""
-          num_subscribers = 10
-          num_channels = 5
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-
-      def test_cord_subscriber_join_and_recv_stream_surfing_100_channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify,
-                                                              self.igmp_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_recv_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify,
-                                                              self.igmp_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_recv_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify,
-                                                             self.igmp_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_recv_1200channels(self):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify,
-                                                                self.igmp_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_recv_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify,
-                                                                  self.igmp_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_jump_100channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                            self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_cord_subscriber_join_jump_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                              self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_jump_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                          self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_cord_subscriber_join_jump_1200channel(sself):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                           self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_cord_subscriber_join_jump_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify,
-                                                           self.igmp_jump_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_next_100channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                            self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_next_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_next_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                            self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_cord_subscriber_join_next_1200channels(self):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_cord_subscriber_join_next_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify,
-                                                           self.igmp_next_verify, self.traffic_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_authentication_with_dhcp_request_out_of_pool_ip_by_client_validating_channel_surfing(self):
-          ### """Test 1k subscribers auth with DHCP client requesting out of pool ip and join channel"""
-          num_subscribers = 1000
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_verify, self.dhcp_client_requested_out_pool_ip_scenario, self.igmp_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_recv_100channel(self):
-          ###"""Test 1k subscribers join and receive for 100 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_jump_100channel(self):
-          ###"""Test 1k subscribers jump and receive for 100 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_next_100channel(self):
-          ###"""Test 1k subscribers join next for 100 channels"""
-          num_subscribers = 1000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_recv_400channel(self):
-          ###"""Test 1k subscribers join and receive for 400 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_jump_400channel(self):
-          ###"""Test 1k subscribers jump and receive for 400 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_next_400channel(self):
-          ###"""Test 1k subscribers join next for 400 channels"""
-          num_subscribers = 1000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_recv_800channel(self):
-          ###"""Test 1k subscribers join and receive for 800 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_jump_800channel(self):
-          ###"""Test 1k subscribers jump and receive for 800 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_next_800channel(self):
-          ###"""Test 1k subscribers join next for 800 channels"""
-          num_subscribers = 1000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_recv_1200channel(self):
-          ###"""Test 1k subscribers join and receive for 1200 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_jump_1200channel(self):
-          ###"""Test 1k subscribers jump and receive for 1200 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_next_1200channel(self):
-          ###"""Test 1k subscribers join next for 1200 channels"""
-          num_subscribers = 1000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_recv_1500channel(self):
-          ###"""Test 1k subscribers join and receive for 1500 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_jump_1500channel(self):
-          ###"""Test 1k subscribers jump and receive for 1500 channels surfing"""
-          num_subscribers = 1000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_1k_cord_subscribers_join_next_1500channel(self):
-          ###"""Test 1k subscribers join next for 1500 channels"""
-          num_subscribers = 1000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_recv_100channel(self):
-          ###"""Test 5k subscribers join and receive for 100 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_jump_100channel(self):
-          ###"""Test 5k subscribers jump and receive for 100 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_next_100channel(self):
-          ###"""Test 5k subscribers join next for 100 channels"""
-          num_subscribers = 5000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_recv_400channel(self):
-          ###"""Test 5k subscribers join and receive for 400 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_jump_400channel(self):
-          ###"""Test 5k subscribers jump and receive for 400 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_next_400channel(self):
-          ###"""Test 5k subscribers join next for 400 channels"""
-          num_subscribers = 5000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_recv_800channel(self):
-          ###"""Test 5k subscribers join and receive for 800 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_jump_800channel(self):
-          ###"""Test 5k subscribers jump and receive for 800 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_next_800channel(self):
-          ###"""Test 5k subscribers join next for 800 channels"""
-          num_subscribers = 5000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_recv_1200channel(self):
-          ###"""Test 5k subscribers join and receive for 1200 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_jump_1200channel(self):
-          ###"""Test 5k subscribers jump and receive for 1200 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_next_1200channel(self):
-          ###"""Test 5k subscribers join next for 1200 channels"""
-          num_subscribers = 5000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_recv_1500channel(self):
-          ###"""Test 5k subscribers join and receive for 1500 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_jump_1500channel(self):
-          ###"""Test 5k subscribers jump and receive for 1500 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_5k_cord_subscribers_join_next_1500channel(self):
-          ###"""Test 5k subscribers join next for 1500 channels"""
-          num_subscribers = 5000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_recv_100channel(self):
-          ###"""Test 10k subscribers join and receive for 100 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_jump_100channel(self):
-          ###"""Test 10k subscribers jump and receive for 100 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_next_100channel(self):
-          ###"""Test 10k subscribers join next for 100 channels"""
-          num_subscribers = 10000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_recv_100channel(self):
-          ###"""Test 100k subscribers join and receive for 100 channels surfing"""
-          num_subscribers = 100000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_jump_100channel(self):
-          ###"""Test 100k subscribers jump and receive for 100 channels surfing"""
-          num_subscribers = 100000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_next_100channel(self):
-          ###"""Test 100k subscribers join next for 100 channels"""
-          num_subscribers = 100000
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_recv_400channel(self):
-          ###"""Test 10k subscribers join and receive for 400 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_jump_400channel(self):
-          ###"""Test 10k subscribers jump and receive for 400 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_next_400channel(self):
-          ###"""Test 10k subscribers join next for 400 channels"""
-          num_subscribers = 10000
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_recv_800channel(self):
-          ###"""Test 10k subscribers join and receive for 800 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_jump_800channel(self):
-          ###"""Test 10k subscribers jump and receive for 800 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_next_800channel(self):
-          ###"""Test 10k subscribers join next for 800 channels"""
-          num_subscribers = 10000
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_recv_1200channel(self):
-          ###"""Test 10k subscribers join and receive for 1200 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_jump_1200channel(self):
-          ###"""Test 10k subscribers jump and receive for 1200 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_next_1200channel(self):
-          ###"""Test 10k subscribers join next for 1200 channels"""
-          num_subscribers = 10000
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_recv_1500channel(self):
-          ###"""Test 10k subscribers join and receive for 1500 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_jump_1500channel(self):
-          ###"""Test 10k subscribers jump and receive for 1500 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_10k_cord_subscribers_join_next_1500channel(self):
-          ###"""Test 10k subscribers join next for 1500 channels"""
-          num_subscribers = 10000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_recv_1500channel(self):
-          ###"""Test 100k subscribers join and receive for 1500 channels surfing"""
-          num_subscribers = 100000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_jump_1500channel(self):
-          ###"""Test 10k subscribers jump and receive for 1500 channels surfing"""
-          num_subscribers = 100000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      @nottest
-      def test_100k_cord_subscribers_join_next_1500channel(self):
-          ###"""Test 10k subscribers join next for 1500 channels"""
-          num_subscribers = 100000
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-
-      def remove_olt(self, switch_map):
-          controller = get_controller()
-          auth = ('karaf', 'karaf')
-          #remove subscriber for every port on all the voltha devices
-          for device, device_map in switch_map.iteritems():
-              uni_ports = device_map['ports']
-              uplink_vlan = device_map['uplink_vlan']
-              for port in uni_ports:
-                  rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}'.format(controller,
-                                                                           device,
-                                                                           port)
-                  resp = requests.delete(rest_url, auth = auth)
-                  if resp.status_code not in [204, 202, 200]:
-                        log_test.error('Error deleting subscriber for device %s on port %s' %(device, port))
-                  else:
-                        log_test.info('Deleted subscriber for device %s on port  %s' %(device, port))
-
-      def config_olt(self, switch_map):
-          controller = get_controller()
-          auth = ('karaf', 'karaf')
-          #configure subscriber for every port on all the voltha devices
-          for device, device_map in switch_map.iteritems():
-              uni_ports = device_map['ports']
-              uplink_vlan = device_map['uplink_vlan']
-              for port in uni_ports:
-                  vlan = port
-                  rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}/{}'.format(controller,
-                                                                              device,
-                                                                              port,
-                                                                              vlan)
-                  requests.post(rest_url, auth = auth)
-                  #assert_equal(resp.ok, True)
-
-      def cord_subscriber_voltha(self, services, cbs = None, num_subscribers = 1, num_channels = 1):
-          """Test subscriber join next for channel surfing"""
-          if self.VOLTHA_HOST is None:
-                log_test.info('Skipping test as no voltha host')
-                return
-          switch_map = None
-          olt_configured = False
-          try:
-                switch_map = self.voltha_switch_map
-                if not switch_map:
-                      log_test.info('No voltha devices found')
-                      return
-                log_test.info('Adding subscribers through OLT app')
-                self.config_olt(switch_map)
-                olt_configured = True
-                time.sleep(5)
-                self.num_subscribers = num_subscribers
-                self.num_channels = num_channels
-                test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                          num_channels = self.num_channels,
-                                                          cbs = cbs,
-                                                          port_list = self.generate_port_list(self.num_subscribers,
-                                                                                              self.num_channels),
-                                                          services = services)
-                assert_equal(test_status, True)
-          finally:
-                if switch_map is not None:
-                      if olt_configured is True:
-                            self.remove_olt(switch_map)
-
-      def test_subscriber_for_voltha_and_aaa_app_with_tls(self):
-          """Test subscriber TLS authentication with voltha"""
-          if self.VOLTHA_HOST is None:
-                log_test.info('Skipping test as no voltha host')
-                return
-          num_subscribers = 1
-          num_channels = 1
-          services = ('TLS',)
-          cbs = ( self.tls_verify, )
-          self.cord_subscriber_voltha(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-
-      def test_subscriber_for_voltha_and_aaa_app_with_tls_and_igmp(self):
-          """Test subscriber TLS and IGMP with voltha with 1 channel"""
-          if self.VOLTHA_HOST is None:
-                log_test.info('Skipping test as no voltha host')
-                return
-          num_subscribers = 1
-          num_channels = 1
-          services = ('TLS','IGMP',)
-          cbs = ( self.tls_verify, self.voltha_igmp_next_verify,)
-          self.cord_subscriber_voltha(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-
-      def test_subscriber_for_voltha_and_aaa_app_with_tls_and_igmp_with_three_subscribers_and_three_channels(self):
-          """Test subscriber TLS and IGMP with voltha for channel surfing with 3 subscribers browsing 3 channels each"""
-          if self.VOLTHA_HOST is None:
-                log_test.info('Skipping test as no voltha host')
-                return
-          num_subscribers = 3
-          num_channels = 3
-          services = ('TLS','IGMP',)
-          cbs = ( self.tls_verify, self.voltha_igmp_next_verify,)
-          self.cord_subscriber_voltha(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-
-      def test_subscribers_to_join_channels_and_recv_traffic_using_igmp_proxy_app(self):
-          """Test subscriber join and receive for channel surfing"""
-          self.num_subscribers = 5
-          self.num_channels = 1
-          self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = self.num_subscribers,
-                                                    num_channels = self.num_channels,
-                                                    port_list = self.generate_port_list(self.num_subscribers,
-                                                                                        self.num_channels))
-              assert_equal(test_status, True)
-	      self.igmp_proxy_teardown()
-	  except:
-	      log_test.info('got some error')
-	      self.igmp_proxy_teardown()
-	      raise
-
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_two_subscribers_authentication_with_valid_and_invalid_certificates_validating_channel_surfing_using_igmp_proxy_app(self):
-          ### """Test 2 subscribers to auth, one of the subscriber with invalidCertification and join channel"""
-          num_subscribers = 2
-          num_channels = 1
-	  self.igmpproxy_setup()
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-	      try:
-                  test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_invalid_cert, self.dhcp_verify,self.igmp_verify, self.traffic_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                                             negative_subscriber_auth = 'half')
-                  assert_equal(test_status, True)
-                  self.igmp_proxy_teardown()
-              except:
-                  log_test.info('got some error')
-                  self.igmp_proxy_teardown()
-                  raise
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      def test_subscriber_authentication_with_multiple_dhcp_discover_for_same_subscriber_and_validating_channel_surfing_using_igmp_proxy_app(self):
-          ### """Test subscriber auth , sending same DHCP client discover multiple times and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                   num_channels = num_channels,
-                                   cbs = (self.tls_verify, self.dhcp_same_client_multi_discovers_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative                                                         negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      @deferred(SUBSCRIBER_TIMEOUT)
-      def test_subscriber_authentication_with_dhcp_client_reboot_also_validating_channel_surfing_using_igmp_proxy_app(self):
-          ### """Test subscriber auth success, DHCP client got re-booted and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-	  self.igmpproxy_setup()
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-	      try:
-                  test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_reboot_scenario,
-                                                                     self.igmp_verify, self.traffic_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                             negative_subscriber_auth = 'all')
-                  assert_equal(test_status, True)
-                  self.igmp_proxy_teardown()
-              except:
-                  log_test.info('got some error')
-                  self.igmp_proxy_teardown()
-                  raise
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-      def test_four_subscribers_to_join_and_jump_five_channel_using_igmp_proxy_app(self):
-          ###"""Test 4 subscribers jump and receive for 5 channels surfing"""
-          num_subscribers = 4
-          num_channels = 5
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      def test_four_subscribers_to_join_next_five_channel_using_igmp_proxy_app(self):
-          ###"""Test 4 subscribers join next for 5 channels"""
-          num_subscribers = 4
-          num_channels = 5
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      @nottest
-      def test_1000_subscribers_to_join_next_1500channels_using_igmp_proxy_app(self):
-          ###"""Test 1k subscribers join next for 1500 channels"""
-          num_subscribers = 1000
-          num_channels = 1500
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      @nottest
-      def test_5000_subscribers_to_join_and_jump_800_channels_using_igmp_proxy_app(self):
-          ###"""Test 5k subscribers jump and receive for 800 channels surfing"""
-          num_subscribers = 5000
-          num_channels = 800
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      @nottest
-      def test_10000_subscribers_join_1200_channels_and_recv_traffic_using_igmp_proxy_app(self):
-          ###"""Test 10k subscribers join and receive for 1200 channels surfing"""
-          num_subscribers = 10000
-          num_channels = 1200
-	  self.igmpproxy_setup()
-	  try:
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-              assert_equal(test_status, True)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
-      @nottest
-      def test_three_subscribers_with_voltha_tls_and_igmp_proxy(self):
-          """Test subscriber TLS and IGMP with voltha for channel surfing with 3 subscribers browsing 3 channels each"""
-          if self.VOLTHA_HOST is None:
-                log_test.info('Skipping test as no voltha host')
-                return
-          num_subscribers = 3
-          num_channels = 3
-          services = ('TLS','IGMP',)
-          cbs = ( self.tls_verify, self.voltha_igmp_next_verify,)
-	  try:
-              self.cord_subscriber_voltha(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-              self.igmp_proxy_teardown()
-          except:
-              log_test.info('got some error')
-              self.igmp_proxy_teardown()
-              raise
-
diff --git a/src/test/cordvtn/__init__.py b/src/test/cordvtn/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/cordvtn/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/cordvtn/cordvtnTest.py b/src/test/cordvtn/cordvtnTest.py
deleted file mode 100644
index ed70674..0000000
--- a/src/test/cordvtn/cordvtnTest.py
+++ /dev/null
@@ -1,4641 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import os,sys
-import keystoneclient.v2_0.client as ksclient
-import keystoneclient.apiclient.exceptions
-import neutronclient.v2_0.client as nclient
-import neutronclient.common.exceptions
-import novaclient.v1_1.client as novaclient
-from novaclient import client as nova_client
-from multiprocessing import Pool
-from neutronclient.v2_0 import client as neutron_client
-import neutronclient.v2_0.client as neutronclient
-from nose.tools import assert_equal
-from CordTestUtils import get_mac, log_test
-from onosclidriver import OnosCliDriver
-from OnosCtrl import OnosCtrl
-from CordLogger import CordLogger
-from TestManifest import TestManifest
-from OnosFlowCtrl import OnosFlowCtrl
-from scapy.all import *
-from credentials import *
-from VSGAccess import VSGAccess
-from SSHTestAgent import SSHTestAgent
-import requests
-import time
-import py_compile
-import json
-
-PROTO_NAME_TCP = 'tcp'
-PROTO_NAME_ICMP = 'icmp'
-IPv4 = 'IPv4'
-
-OS_USERNAME = 'admin'
-OS_PASSWORD = 'VeryLongKeystoneAdminPassword'
-OS_TENANT = 'admin'
-OS_AUTH_URL = 'https://keystone.cord.lab:5000/v2.0'
-OS_SERVICE_ENDPOINT = 'https://keystone.cord.lab:5000/v2.0/'
-VM_BOOT_TIMEOUT = 100
-VM_DELETE_TIMEOUT = 100
-
-
-#VM SSH CREDENTIALS
-VM_USERNAME = 'ubuntu'
-VM_PASSWORD = 'ubuntu'
-
-TENANT_PREFIX = 'test-'
-VM_PREFIX = 'test-'
-NETWORK_PREFIX = 'test-'
-CIDR_PREFIX = '192.168'
-
-class vtn_validation_utils:
-
-    endpoint = '172.17.0.5'
-    version = ''
-    vtn_app = 'org.opencord.vtn'
-
-    def __init__(self, version):
-        self.version = version
-        self.manifest = None
-        self.vtn_enabled = False
-        manifest = os.getenv('MANIFEST', None)
-        if manifest:
-            self.manifest = TestManifest(manifest = manifest)
-            self.endpoint = self.manifest.onos_ip
-            self.vtn_enabled = self.manifest.synchronizer == 'vtn'
-
-        self.app_ctrl = OnosCtrl(self.vtn_app, controller = self.endpoint)
-
-    def getDevices(self):
-        return OnosCtrl.get_devices(controller = self.endpoint)
-
-    def getLinks(self):
-        return OnosCtrl.get_links(controller = self.endpoint)
-
-    def getDevicePorts(self, switch_id):
-        return OnosCtrl.get_ports_device(switch_id, controller = self.endpoint)
-
-    def activateVTNApp(self):
-        return self.app_ctrl.activate()
-
-    def deactivateVTNApp(self):
-        return self.app_ctrl.deactivate()
-
-class cordvtn_exchange(CordLogger):
-
-    app_cordvtn = 'org.opencord.vtn'
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    cordvtn_dir = os.path.join(test_path, '..', 'setup')
-    cordvtn_conf_file = os.path.join(test_path, '..', '../cordvtn/network_cfg.json')
-    head_node_user = 'vagrant'
-    head_node_pass = 'vagrant'
-    head_node = os.getenv('HEAD_NODE', 'prod')
-    head_node_ip = '10.1.0.1'
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the cordvtn app'''
-        time.sleep(3)
-        cls.onos_ctrl = OnosCtrl(cls.app_cordvtn)
-        status, _ = cls.onos_ctrl.activate()
-        #assert_equal(status, True)
-        time.sleep(3)
-        cls.cordvtn_setup()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the cord vtn app'''
-        #cls.onos_ctrl.deactivate()
-        #cls.cord_vtn_cleanup()
-
-    @classmethod
-    def cordvtn_setup(cls):
-        pass
-
-    @classmethod
-    def cord_vtn_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        pass
-
-    @classmethod
-    def onos_load_config(cls, cordvtn_conf_file):
-        status, code = OnosCtrl.config(cordvtn_conf_file)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    def get_neutron_credentials(self):
-        n = {}
-        n['username'] = os.environ['OS_USERNAME']
-        n['password'] = os.environ['OS_PASSWORD']
-        n['auth_url'] = os.environ['OS_AUTH_URL']
-        n['tenant_name'] = os.environ['OS_TENANT_NAME']
-        n['ca_cert'] = os.environ['REQUESTS_CA_BUNDLE']
-        return n
-
-    @classmethod
-    def get_compute_nodes(cls):
-        credentials = get_nova_credentials_v2()
-        novaclient = nova_client.Client('2', **credentials)
-        print novaclient.hypervisors.list()
-        return novaclient.hypervisors.list()
-
-    def create_network(i):
-        neutron_credentials = get_neutron_credentials()
-        neutron = neutron_client.Client(**neutron_credentials)
-        json = {'network': {'name': 'network-' + str(i),
-                            'admin_state_up': True}}
-        while True:
-           try:
-              net = neutron.create_network(body=json)
-              print '\nnetwork-' + str(i) + ' created'
-              return net
-           except Exception as e:
-              print e
-              continue
-
-    def create_tenant(tenant_name):
-        new_tenant = keystone.tenants.create(tenant_name=tenant_name,
-                     description="CORD Tenant \
-                     created",
-                     enabled=True)
-        tenant_id = new_tenant.id
-        tenant_status = True
-        user_data = []
-        for j in range(2):
-            j += 1
-            user_name = tenant_name + '-user-' + str(j)
-            user_data.append(create_user(user_name, tenant_id))
-
-        print " Tenant and User Created"
-
-        tenant_data = {'tenant_name': tenant_name,
-                       'tenant_id': tenant_id,
-                       'status': tenant_status}
-        return tenant_data
-
-    def create_user(user_name, tenant_id):
-        new_user = keystone.users.create(name=user_name,
-                                         password="ubuntu",
-                                         tenant_id=tenant_id)
-        print('   - Created User %s' % user_name)
-        keystone.roles.add_user_role(new_user, member_role, tenant_id)
-        if assign_admin:
-           admin_user = keystone.users.find(name='admin')
-           admin_role = keystone.roles.find(name='admin')
-           keystone.roles.add_user_role(admin_user, admin_role, tenant_id)
-        user_data = {'name': new_user.name,
-                     'id': new_user.id}
-        return user_data
-
-    def create_port( router_id, network_id):
-        credentials = get_credentials()
-        neutron = client.Client(**credentials)
-        router = neutron.show_router(router_id)
-
-        value = {'port':{
-        'admin_state_up':True,
-        'device_id': router_id,
-        'name': 'port1',
-        'network_id':network_id,
-        }}
-        response = neutron.create_port(body=value)
-
-    def router_create(self, name):
-        external_network = None
-        for network in self.neutron.list_networks()["networks"]:
-            if network.get("router:external"):
-                external_network = network
-                break
-
-        if not external_network:
-            raise Exception("Alarm! Can not to find external network")
-
-        gw_info = {
-            "network_id": external_network["id"],
-            "enable_snat": True
-        }
-        router_info = {
-            "router": {
-                "name": name,
-                "external_gateway_info": gw_info,
-                "tenant_id": self.tenant_id
-            }
-        }
-        router = self.neutron.router_create(router_info)['router']
-        return router
-
-    def delete_tenant(tenant_name):
-        tenant = keystone.tenants.find(name=tenant_name)
-        for j in range(2):
-            j += 1
-            user_name = tenant_name + '-user-' + str(j)
-            delete_user(user_name, tenant.id)
-        tenant.delete()
-        print('   - Deleted Tenant %s ' % tenant_name)
-        return True
-
-    def delete_user(user_name, tenant_id):
-        user = keystone.users.find(name=user_name)
-        user.delete()
-
-        print('   - Deleted User %s' % user_name)
-        return True
-
-    def set_environment(tenants_num=0, networks_per_tenant=1, vms_per_network=2):
-        octet = 115
-        vm_inc = 11
-        image = nova_connection.images.get(IMAGE_ID)
-        flavor = nova_connection.flavors.get(FLAVOR_ID)
-
-        admin_user_id = keystone_connection.users.find(name=OS_USERNAME).id
-        member_role_id = keystone_connection.roles.find(name='Member').id
-        for num_tenant in range(1, tenants_num+1):
-            tenant = keystone_connection.tenants.create('%stenant%s' % (TENANT_PREFIX, num_tenant))
-            keystone_connection.roles.add_user_role(admin_user_id, member_role_id, tenant=tenant.id)
-            for num_network in range(networks_per_tenant):
-                network_json = {'name': '%snet%s' % (NETWORK_PREFIX, num_tenant*10+num_network),
-                                'admin_state_up': True,
-                                'tenant_id': tenant.id}
-                network = neutron_connection.create_network({'network': network_json})
-                subnet_json = {'name': '%ssubnet%s' % (NETWORK_PREFIX, num_tenant*10+num_network),
-                               'network_id': network['network']['id'],
-                               'tenant_id': tenant.id,
-                               'enable_dhcp': True,
-                               'cidr': '%s.%s.0/24' % (CIDR_PREFIX, octet), 'ip_version': 4}
-                octet += 1
-                subnet = neutron_connection.create_subnet({'subnet': subnet_json})
-                router_json = {'name': '%srouter%s' % (NETWORK_PREFIX, num_tenant*10+num_network),
-                               'tenant_id': tenant.id}
-                router = neutron_connection.router_create({'router': router_json})
-                port = neutron_connection.add_interface_router(router['router']['id'], {'subnet_id': subnet['subnet']['id']})
-                for num_vm in range(vms_per_network):
-                    tenant_nova_connection = novacli.Client(OS_USERNAME, OS_PASSWORD, tenant.name, OS_AUTH_URL)
-                    m = tenant_nova_connection.servers.create('%svm%s' % (VM_PREFIX, vm_inc), image, flavor, nics=[{'net-id': network['network']['id']}, {'net-id': MGMT_NET}])
-                    vm_inc += 1
-
-    def verify_neutron_crud():
-        x = os.system("neutron_test.sh")
-        return x
-
-    def list_floatingips( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_floatingips(**kwargs)['floatingips']
-
-    def list_security_groups( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_security_groups(**kwargs)['security_groups']
-
-    def list_subnets( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_subnets(**kwargs)['subnets']
-
-    def list_networks( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_networks(**kwargs)['networks']
-
-    def list_ports( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_ports(**kwargs)['ports']
-
-    def list_routers( **kwargs):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        return neutron.list_routers(**kwargs)['routers']
-
-    def update_floatingip( fip, port_id=None):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.update_floatingip(fip, {"floatingip":
-                                              {"port_id": port_id}})
-
-    def update_subnet( subnet_id, **subnet_params):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.update_subnet(subnet_id, {'subnet': subnet_params})
-
-    def update_router( router_id, **router_params):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.update_router(router_id, {'router': router_params})
-
-    def router_gateway_set( router_id, external_gateway):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.update_router(
-        router_id, {'router': {'external_gateway_info':
-                               {'network_id': external_gateway}}})
-
-    def router_gateway_clear( router_id):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.update_router(
-        router_id, {'router': {'external_gateway_info': None}})
-
-    def router_add_interface( router_id, subnet_id):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.add_interface_router(router_id, {'subnet_id': subnet_id})
-
-    def router_rem_interface( router_id, subnet_id):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        neutron.remove_interface_router(
-        router_id, {'subnet_id': subnet_id})
-
-    def create_floatingip( **floatingip_params):
-        creds = get_neutron_credentials()
-        neutron = client.Client(**creds)
-        response = neutron.create_floatingip(
-        {'floatingip': floatingip_params})
-        if 'floatingip' in response and 'id' in response['floatingip']:
-           return response['floatingip']['id']
-
-    def make_iperf_pair(server, client, **kwargs):
-        ssh = SSHClient()
-        ssh.set_missing_host_key_policy(MissingHostKeyPolicy())
-
-        ssh.connect(server, username=VM_USERNAME, password=VM_PASSWORD)
-        ssh.exec_command('/usr/local/bin/iperf3 -s -D')
-
-        ssh.connect(client, username=VM_USERNAME, password=VM_PASSWORD)
-        stdin, stdout, stderr = ssh.exec_command('/usr/local/bin/iperf3 -c %s -J' % server)
-
-        rawdata = stdout.read()
-        data = json.loads(rawdata.translate(None,'\t').translate(None,'\n'))
-
-        return data
-
-    def connect_ssh(os_ip, private_key_file=None, user='ubuntu'):
-        key = ssh.RSAKey.from_private_key_file(private_key_file)
-        client = ssh.SSHClient()
-        client.set_missing_host_key_policy(ssh.WarningPolicy())
-        client.connect(ip, username=user, pkey=key, timeout=5)
-        return client
-
-    def validate_vtn_flows(switch):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        device_id = 'of:{}'.format(get_mac(switch))
-        ctlr = self.ctlr_ip.split(',')[0]
-        flow = OnosFlowCtrl(deviceId = device_id,
-                            egressPort = egress,
-                            ingressPort = ingress,
-                            ethType = '0x800',
-                            ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
-                            ipDst = ('IPV4_DST', egress_map['ip']+'/32'),
-                            controller = ctlr
-                            )
-        flow_id = flow.findFlow(device_id, IN_PORT = ('port', ingress),
-                                ETH_TYPE = ('ethType','0x800'), IPV4_SRC = ('ip', ingress_map['ip']+'/32'),
-                                IPV4_DST = ('ip', egress_map['ip']+'/32'))
-        if flow_id:
-           return True
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-
-    def cordvtn_config_load(self, config = None):
-        if config:
-           for k in config.keys():
-               if cordvtn_config.has_key(k):
-                  cordvtn_config[k] = config[k]
-        self.onos_load_config(self.cordvtn_dict)
-
-    def search_value(self, d, pat):
-        match = False
-        for k, v in d.items():
-            if isinstance(v, dict):
-               match = self.search_value(v, pat)
-               if match is True:
-                  break
-            elif type(v) is list:
-                 for i in range(len(v)):
-                    if type(v[i]) is dict:
-                       match = self.search_value(v[i], pat)
-                       if match is True:
-                          break
-                    else:
-                       if v[i] == pat:
-                          match = True
-                          return match
-            elif v == pat:
-                 match = True
-                 return match
-        if match is True:
-           print"Network search is successful"
-        return match
-
-    def get_key_value(self, d, key = None, value = None,):
-        match = False
-        ret_k = ""
-        ret_v = ""
-        if type(d) is not dict:
-           if type(d) is not list:
-              match = 'NOT_FOUND'
-              return [match, ret_k, ret_v]
-           else:
-              for i in range(len(d)):
-                  if type(d[i]) is dict:
-                     match,ret_k,ret_v = self.get_key_value(d[i], key, value)
-                     if match is True:
-                        print "Network creation is successful"
-                        break
-        else:
-           for k, v in d.items():
-              if isinstance(v, dict):
-                 match,ret_k,ret_v = self.get_key_value(v, key, value)
-                 if match is True:
-                    break
-              elif type(v) is list:
-                   for i in range(len(v)):
-                      if type(v[i]) is dict:
-                         match,ret_k,ret_v = self.get_key_value(v[i], key, value)
-                         if match is True:
-                            break
-                      else:
-                         if key:
-                            if k == key:
-                               match = True
-                               return [match, key, v]
-                         elif value:
-                              if v == value:
-                                 match = True
-                                 return [match, k, value]
-              else:
-                  if key:
-                     if k == key:
-                        match = True
-                        return [match, key, v]
-                     elif value:
-                        if v == value:
-                           match = True
-                           return [match, k, value]
-        if match == False:
-           match = 'NOT_FOUND'
-        return [match, ret_k, ret_v]
-
-    def neutron_network_creation_and_validation(self, net_name):
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": net_name,"admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        networks = neutron.list_networks(name=net_name)
-        data = networks
-        return self.search_value(data, net_name)
-
-    def neutron_network_deletion(self, net_name):
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        networks = neutron.list_networks(name=net_name)
-        net_id = self.get_key_value(d=networks, key = 'id')
-        net = neutron.delete_network(net_id[2])
-        return self.get_key_value(d=networks, value = net_name)
-
-    def temp_neutron_subnet_creation_and_validation_v1(self,net_name,sub_cird, sub_net_start = "172.27.0.2", sub_net_end = "172.27.0.200"):
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        networks = neutron.list_networks(name=net_name)
-        net_id = self.get_key_value(d=networks, key = 'id')
-        cidr = sub_cird
-        body_subnet_example = {"subnet":{"network_id": net_id[2],"ip_version":4, "cidr":str(cidr),  "allocation_pools": [{"start": "172.27.0.20", "end": "172.27.0.21"}]}}
-        neutron_sub = neutron.create_subnet(body_subnet_example)
-        networks = neutron.list_networks(name=net_name)
-        return self.get_key_value(d=networks, key = 'subnets')
-
-    def neutron_subnet_creation_and_validation(self,net_name,sub_cird):
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        networks = neutron.list_networks(name=net_name)
-        net_id = self.get_key_value(d=networks, key = 'id')
-        if sub_cird[0] == 'management':
-           cidr = sub_cird[1]
-           body_subnet_example = {"subnet":{"network_id": net_id[2],"ip_version":4, "cidr":str(cidr),  "allocation_pools": [{"start": sub_cird[2], "end": sub_cird[3]}]}}
-        elif sub_cird[0] == 'public':
-           cidr = sub_cird[1]
-           gate_way = sub_cird[2]
-           body_subnet_example = {"subnet":{"network_id": net_id[2],"ip_version":4, "cidr":str(cidr), "gateway_ip":str(gate_way)}}
-        elif sub_cird[0] == 'private':
-           cidr = sub_cird[1]
-           gate_way = sub_cird[2]
-           body_subnet_example = {"subnet":{"network_id": net_id[2],"ip_version":4, "cidr":str(cidr), "gateway_ip":str(gate_way)}}
-
-        neutron_sub = neutron.create_subnet(body_subnet_example)
-        networks = neutron.list_networks(name=net_name)
-        return self.get_key_value(d=networks, key = 'subnets')
-
-    def sub_network_type_post_to_onos(self,net_name,sub_net_type):
-
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        networks = neutron.list_networks(name=net_name)
-        net_id = self.get_key_value(d=networks, key = 'id')
-        vtn_util = vtn_validation_utils('')
-
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-        network_data = {"ServiceNetwork":{"id": net_id[2],"type":sub_net_type, "providerNetworks":[]}}
-        json_network_type_data = json.dumps(network_data)
-        resp = requests.post(url=url, auth=auth, data =json_network_type_data)
-        return resp
-
-    def service_dependency_on_network_post_to_onos(self,service_network_name,provider_network_name,service_network_type ='private'):
-
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        service_network = neutron.list_networks(name=service_network_name)
-        service_net_id = self.get_key_value(d=service_network, key = 'id')
-        provider_network = neutron.list_networks(name=provider_network_name)
-        provider_net_id = self.get_key_value(d=provider_network, key = 'id')
-        vtn_util = vtn_validation_utils('')
-
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-        network_data = {"ServiceNetwork":{"id": service_net_id[2],"type":service_network_type, "providerNetworks":[{"bidirectional": 'true', "id": provider_net_id[2]}]}}
-        json_network_type_data = json.dumps(network_data)
-        resp = requests.post(url=url, auth=auth, data =json_network_type_data)
-        return resp
-
-    def nova_instance_creation_and_validation(self,net_name,nova_obj,instance_name,image_name, flavor_id):
-        print nova_obj.images.list()
-        image = nova_obj.images.find(name=image_name)
-        flavor = nova_obj.flavors.find(name=flavor_id)
-
-        nics_list = ""
-        if len(net_name) == 2:
-            network_1 = nova_obj.networks.find(label=net_name[0])
-            network_2 = nova_obj.networks.find(label=net_name[1])
-            print network_1.id
-            print network_2.id
-            nics_list = [{'net-id':network_1.id},{'net-id':network_2.id}]
-        else:
-            network_1 = nova_obj.networks.find(label=net_name)
-            print network_1.id
-            nics_list = [{'net-id':network_1.id}]
-
-        server = nova_obj.servers.create(name = instance_name,
-                                         image = image.id,
-                                         flavor = flavor.id,
-                                         nics = nics_list,
-                                         userdata = "#cloud-config \n password: ubuntu \n chpasswd: { expire: False }\n ssh_pwauth: True")
-                                 #        key_name = 'id_rsa')
-        server_details =  nova_obj.servers.find(id=server.id)
-        print('Server is launched and status is %s' %server_details.status)
-        if server_details.status == 'BUILD':
-           time.sleep(120)
-        server_details =  nova_obj.servers.find(id=server.id)
-        print('After delay server status is %s state'%server_details.status)
-        if server_details.status == 'ERROR':
-           print('Server status is still in %s state'%server_details.status)
-        server_boot_up_log = nova_obj.servers.get_console_output(server.id)
-        print 'Server boot Up console log \n%s'%server_boot_up_log
-        return server_details
-
-    def create_net_subnet_nova_instance(self,net_name, subnet_name, instance_vm_details,management_type):
-        result = self.neutron_network_creation_and_validation(net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(net_name,subnet_name)# sub_net_start = subnet_name[2], sub_net_end =subnet_name[3])
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(net_name, management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(net_name,nova,instance_vm_details[0],instance_vm_details[1],instance_vm_details[2])
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        return [nova, new_instance_details]
-
-    def nova_instance_tenants_access_check(self, target_tenants_details, compute_details = None, source_tenants_details = None , check_type = 'Ping_from_compute'):
-        source_tenant_ip = ''
-        target_tenant_ip = ''
-        cmd = ''
-        status = ''
-        output = ''
-
-        ## TO DO New instance is not getting subnet ip, hence checking only mysite-vsg1 vm from compute node
-        if compute_details is None:
-           compute_ip = '10.1.0.17'
-        else:
-           compute_ip = compute_details.ip
-
-        ## TO DO New instance is not getting subnet ip, hence checking only mysite-vsg1 vm from compute node
-        if target_tenants_details == {}:
-           target_tenants_details = '10.1.0.1'
-
-        ## TO DO New instance is not getting subnet ip, hence checking only mysite-vsg1 vm from compute node
-        if source_tenants_details is not None:
-           if source_tenants_details == {}:
-              source_tenants_details = '10.1.0.1'
-
-        if check_type == "Ping_from_compute":
-           cmd2 = "ping -c 3 {0}".format(target_tenants_details)
-           ssh_agent = SSHTestAgent(host = compute_ip)
-           status, output = ssh_agent.run_cmd(cmd2, timeout = 5)
-           print output
-
-        if source_tenants_details is not None:
-           if check_type == "Ping_from_source_tenant":
-              cmd = "ping -c 3 {0}".format(target_tenants_details)
-              ssh_cmd = 'ssh {} {}'.format(source_tenants_details, cmd)
-              print 'Executing ssh command on compute node %s'%ssh_cmd
-              ssh_agent = SSHTestAgent(host = compute_ip)
-              status, output = ssh_agent.run_cmd(ssh_cmd, timeout = 5)
-              print output
-
-        if check_type == "Ping_to_external":
-           cmd = "ping -c 3 google.com"
-           ssh_cmd = 'ssh {} {}'.format(target_tenants_details, cmd)
-           ssh_agent = SSHTestAgent(host = compute_ip)
-           status, output = ssh_agent.run_cmd(ssh_cmd, timeout = 5)
-           print output
-
-        if status == True and output:
-           print "Ping is successful"
-           output = output.strip()
-        elif status == False:
-           print "Ping is not successful"
-           output = None
-        return [status, output]
-
-
-    def nova_instance_deletion(self, nova_obj, server_details):
-        results_nova_instance_deletion=nova_obj.servers.delete(server_details.id)
-        if results_nova_instance_deletion == None:
-           print"Nova instance is deleted"
-        else:
-           print"Nova instance is not deleted"
-        return results_nova_instance_deletion
-
-    def test_cordvtn_neutron_network_creation_and_validation_on_head_node_with_neutron_service(self):
-        """
-        Test Method:
-        0. Create vtn_test_1_net.
-        1. Do GET Rest API and validate creation of network.
-        2. Validate network on neutron openstack.
-        """
-        result = self.neutron_network_creation_and_validation('vtn_test_1_net')
-        if result is True:
-           self.neutron_network_deletion('vtn_test_1_net')
-        assert_equal(result, True)
-
-    def test_cordvtn_neutron_network_creation_and_validation_on_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "vtn_test_2_net","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "vtn_test_2_net")
-        self.neutron_network_deletion('vtn_test_2_net')
-        assert_equal(result, True)
-
-    def test_cordvtn_with_neutron_network_deletion_recreation_and_validation_on_head_node_with_neutron_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        result = self.neutron_network_creation_and_validation('vtn_test_3_net')
-        if result is True:
-           self.neutron_network_deletion('vtn_test_3_net')
-        assert_equal(result, True)
-        result_again = self.neutron_network_creation_and_validation('vtn_test_3_net')
-        if result_again is True:
-           self.neutron_network_deletion('vtn_test_3_net')
-        assert_equal(result, True)
-
-    def test_cordvtn_with_neutron_network_deletion_recreation_and_validation_on_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "vtn_test_4_net","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "vtn_test_4_net")
-        assert_equal(result, True)
-        self.neutron_network_deletion('vtn_test_4_net')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "vtn_test_4_net")
-        assert_equal(result, False)
-        net = neutron.create_network(body=body_example)
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "vtn_test_4_net")
-        self.neutron_network_deletion('vtn_test_4_net')
-        assert_equal(result, True)
-
-    def test_cordvtn_with_neutron_management_network_creation_and_validation_on_head_node_with_neutron_service(self):
-        test_net_name = 'vtn_test_5_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        result = self.neutron_network_creation_and_validation('vtn_test_5_net_management')
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        if sub_result[0] is True:
-           self.neutron_network_deletion('vtn_test_5_net_management')
-        assert_equal(sub_result[0], True)
-
-    def test_cordvtn_with_neutron_management_network_creation_and_validation_on_onos(self):
-        self.neutron_network_creation_and_validation('vtn_test_6_net_management')
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        networks = neutron.list_networks(name='vtn_test_6_net_management')
-        net_id = self.get_key_value(d=networks, key = 'id')
-        cidr = "172.27.0.0/24"
-        body_subnet_example = {"subnet":{"network_id": net_id[2],"ip_version":4, "cidr":str(cidr),  "allocation_pools": [{"start": "172.27.0.20", "end": "172.27.0.21"}]}}
-        neutron_sub = neutron.create_subnet(body_subnet_example)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == 'vtn_test_6_net_management':
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    log_test.info('Sub network is not successful')
-                    self.neutron_network_deletion('vtn_test_6_net_management')
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == cidr:
-                    log_test.info('Sub network is successful')
-                    self.neutron_network_deletion('vtn_test_6_net_management')
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-    def test_cordvtn_neutron_management_network_creation_and_post_network_type_management_local_to_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Pushed the network type as management local to onos
-        5. Verified that onos is having under management network
-        """
-        test_net_name = 'vtn_test_7_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == test_net_name:
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    log_test.info('Sub network is not successful')
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == test_sub_net_cidr[1]:
-                    log_test.info('Sub network is successful')
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        print("Response from onos to change network service type as management local = %s" %net_type_post.text)
-        net_type_json = json.loads(net_type_post.text)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(net_type_json['message'], 'null already exists')
-
-    def test_cordvtn_with_management_network_creation_launching_nova_instance_and_validation_on_head_node_with_nova_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        """
-        test_net_name = 'vtn_test_8_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_8_nova_instance_management_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        self.neutron_network_deletion(test_net_name)
-        self.nova_instance_deletion(nova, new_instance_details)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-
-    def test_cordvtn_with_public_network_creation_and_validation_on_head_node_with_neutron_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork who ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        test_net_name = 'vtn_test_9_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        if sub_result[0] is True:
-           self.neutron_network_deletion(test_net_name)
-        assert_equal(sub_result[0], True)
-
-    def test_cordvtn_with_public_network_creation_and_validation_on_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whoes ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        """
-        test_net_name = 'vtn_test_10_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26", '10.6.1.193']
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == test_net_name:
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    print('Sub network is not successful')
-                    self.neutron_network_deletion(test_net_name)
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == test_sub_net_cidr[1]:
-                    print('Sub network is successful')
-                    self.neutron_network_deletion(test_net_name)
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-    def test_cordvtn_with_public_network_creation_and_post_network_type_as_public_to_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Pushed the network type as management local to onos
-        5. Verified that onos is having under management network
-        """
-        test_net_name = 'vtn_test_11_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26", '10.6.1.193']
-        test_management_type = "public"
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == test_net_name:
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    log_test.info('Sub network is not successful')
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == test_sub_net_cidr[1]:
-                    log_test.info('Sub network is successful')
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        print("Response from onos to change network service type as management local = %s" %net_type_post.text)
-        net_type_json = json.loads(net_type_post.text)
-
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(net_type_json['message'], 'null already exists')
-
-    def test_cordvtn_public_network_creation_with_launching_nova_instance_and_validation_on_head_node_with_nova_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under public network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under public network
-        5. Validate new nova instance is created on nova service
-        """
-        test_net_name = 'vtn_test_12_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        instance_vm_name = 'vtn_test_12_nova_instance_public_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        self.neutron_network_deletion(test_net_name)
-        self.nova_instance_deletion(nova, new_instance_details)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-
-    def test_cordvtn_with_private_network_creation_and_validation_on_head_node_with_neutron_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork who ip is under private network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        test_net_name = 'vtn_test_13_net_private'
-        test_sub_net_cidr = ["private","10.160.160.160/24",'10.160.160.1']
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        if sub_result[0] is True:
-           self.neutron_network_deletion(test_net_name)
-        assert_equal(sub_result[0], True)
-
-    def test_cordvtn_with_private_network_creation_and_validation_on_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whoes ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        """
-        test_net_name = 'vtn_test_14_net_private'
-        test_sub_net_cidr = ["private","10.160.160.160/24", '10.160.160.1']
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == test_net_name:
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    print('Sub network is not successful')
-                    self.neutron_network_deletion(test_net_name)
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == '10.160.160.0/24':
-                 #elif sub_net_id[2] == test_sub_net_cidr[1]:
-                    print('Sub network is successful')
-                    self.neutron_network_deletion(test_net_name)
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-    def test_cordvtn_with_private_network_creation_and_post_network_type_as_private_to_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Pushed the network type as management local to onos
-        5. Verified that onos is having under management network
-        """
-        test_net_name = 'vtn_test_15_net_private'
-        test_sub_net_cidr = ["private","192.168.160.160/24", '192.168.160.1']
-        test_management_type = "private"
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        for i in range(len(data['ServiceNetworks'])):
-              if data['ServiceNetworks'][i]['name'] == test_net_name:
-                 sub_net_id = self.get_key_value(d=data['ServiceNetworks'][i], key = 'subnet')
-                 if sub_net_id[2] == " ":
-                    log_test.info('Sub network is not successful')
-                    assert_equal(False, True)
-                    break
-                 elif sub_net_id[2] == "192.168.160.0/24":
-                    log_test.info('Sub network is successful')
-                    assert_equal(sub_net_id[0], True)
-                    break
-
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        print("Response from onos to change network service type as management local = %s" %net_type_post.text)
-        net_type_json = json.loads(net_type_post.text)
-
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(net_type_json['message'], 'null already exists')
-
-    def test_cordvtn_with_private_network_creation_launching_nova_instance_and_validating_on_head_node_with_nova_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under private network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under private network
-        5. Validate new nova instance is created on nova service
-        """
-        test_net_name = 'vtn_test_16_net_private'
-        test_sub_net_cidr = ["private","192.168.160.160/24", '192.168.160.1']
-        instance_vm_name = 'vtn_test_16_nova_instance_private_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        self.neutron_network_deletion(test_net_name)
-        self.nova_instance_deletion(nova, new_instance_details)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-
-    def test_cordvtn_management_network_instance_and_validate_connectivity_from_host_machine_or_compute_node(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_17_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_17_nova_instance_management_net'
-        #image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, True)
-
-    def test_cordvtn_for_management_network_instance_and_validate_connectivity_to_external_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_18_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_18_nova_instance_management_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'], check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_with_management_network_creating_two_instances_and_validate_connectivity_between_two(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create first nova instance under management network
-        5. Validate first nova instance is created on nova service
-        6. Create second nova instance under management network
-        7. Validate second nova instance is created on nova service
-        8. Now try to ping from one nova instance to other instance, should not success
-        """
-        test_net_name = 'vtn_test_19_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.2", "172.27.0.200"]
-        test_management_type = "management_local"
-        first_instance_vm_name = 'vtn_test_19_nova_1st_instance_management_net'
-        second_instance_vm_name = 'vtn_test_19_nova_2nd_instance_management_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        first_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,first_instance_vm_name,image_name,flavor_id)
-        second_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,second_instance_vm_name,image_name,flavor_id)
-        assert_equal(first_instance_details.status, 'ACTIVE')
-        assert_equal(second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print first_nova_instance_details.addresses
-        print second_nova_instance_details.addresses
-        address_1st_instance = first_nova_instance_details.addresses
-        address_2nd_instance = second_nova_instance_details.addresses
-        print 'Nova 1st instance management ip = %s'%(address_1st_instance[test_net_name][0]['addr'])
-        print 'Nova 2nd instance management ip = %s'%(address_2nd_instance[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_net_name][0]['addr'],source_tenants_details =address_2nd_instance[test_net_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, first_nova_instance_details)
-        self.nova_instance_deletion(nova, second_nova_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_two_management_network_instances_and_validate_connectivity_between_two_networks_via_management_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        """
-        test_netA_name = 'vtn_test_20_netA_management'
-        test_sub_netA_cidr = ["management","172.27.0.0/24","172.27.0.2", "172.27.0.200"]
-        netA_instance_vm_name = 'vtn_test_20_nova_netA_instance_management_net'
-        test_netB_name = 'vtn_test_20_netB_management'
-        test_sub_netB_cidr = ["management","172.28.0.0/24","172.28.0.2", "172.28.0.200"]
-        netB_instance_vm_name = 'vtn_test_20_nova_netB_instance_management_net'
-        test_management_type = "management_local"
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        netA_instance_vm_details = [netA_instance_vm_name, image_name, flavor_id]
-        netB_instance_vm_details = [netB_instance_vm_name, image_name, flavor_id]
-
-        nova_netA, nova_instance_details_netA = self.create_net_subnet_nova_instance(test_netA_name, test_sub_netA_cidr, netA_instance_vm_details, test_management_type)
-        nova_netB, nova_instance_details_netB = self.create_net_subnet_nova_instance(test_netB_name, test_sub_netB_cidr, netB_instance_vm_details, test_management_type)
-
-        assert_equal(nova_instance_details_netA.status, 'ACTIVE')
-        assert_equal(nova_instance_details_netB.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print nova_instance_details_netA.addresses
-        print nova_instance_details_netB.addresses
-        address_1st_instance = nova_instance_details_netA.addresses
-        address_2nd_instance = nova_instance_details_netB.addresses
-        print 'Nova 1st instance management ip = %s'%(address_1st_instance[test_netA_name][0]['addr'])
-        print 'Nova 2nd instance management ip = %s'%(address_2nd_instance[test_netB_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_netA_name][0]['addr'],source_tenants_details =address_2nd_instance[test_netB_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova_netA, nova_instance_details_netA)
-        self.nova_instance_deletion(nova_netB, nova_instance_details_netB)
-        time.sleep(5)
-        self.neutron_network_deletion(test_netA_name)
-        self.neutron_network_deletion(test_netB_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_public_network_instance_and_validate_connectivity_from_host_machine_or_compute_node(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under public network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under public network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is not getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_21_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        test_management_type = "public"
-        instance_vm_name = 'vtn_test_21_nova_instance_pulic_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance public ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_public_network_instance_and_validate_connectivity_to_external_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under public network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under public network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_22_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        test_management_type = "public"
-        instance_vm_name = 'vtn_test_22_nova_instance_public_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance public ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'], check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_public_network_with_two_instances_and_validate_connectivity_between_two(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under public network
-        3. Do GET Rest API and validate creation of network
-        4. Create first nova instance under public network
-        5. Validate first nova instance is created on nova service
-        6. Create second nova instance under public network
-        7. Validate second nova instance is created on nova service
-        8. Now try to ping from one nova instance to other instance, should not success
-        """
-        test_net_name = 'vtn_test_23_net_public'
-        test_sub_net_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        test_management_type = "public"
-        first_instance_vm_name = 'vtn_test_23_nova_1st_instance_public_net'
-        second_instance_vm_name = 'vtn_test_23_nova_2nd_instance_public_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        first_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,first_instance_vm_name,image_name,flavor_id)
-        second_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,second_instance_vm_name,image_name,flavor_id)
-        assert_equal(first_instance_details.status, 'ACTIVE')
-        assert_equal(second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print first_nova_instance_details.addresses
-        print second_nova_instance_details.addresses
-        address_1st_instance = first_nova_instance_details.addresses
-        address_2nd_instance = second_nova_instance_details.addresses
-        print 'Nova 1st instance public ip = %s'%(address_1st_instance[test_net_name][0]['addr'])
-        print 'Nova 2nd instance public ip = %s'%(address_2nd_instance[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_net_name][0]['addr'],source_tenants_details =address_2nd_instance[test_net_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, first_nova_instance_details)
-        self.nova_instance_deletion(nova, second_nova_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_two_public_network_instances_and_check_connectvity_between_two_networks_via_public_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under public network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under public network
-        5. Validate new nova instance is created on nova service
-        """
-        test_netA_name = 'vtn_test_24_netA_public'
-        test_sub_netA_cidr = ["public","10.6.1.192/26",'10.6.1.193']
-        netA_instance_vm_name = 'vtn_test_24_nova_netA_instance_public_net'
-        test_netB_name = 'vtn_test_24_netB_public'
-        test_sub_netB_cidr = ["public","10.6.2.192/26",'10.6.2.193']
-        netB_instance_vm_name = 'vtn_test_24_nova_netB_instance_public_net'
-        test_management_type = "public"
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        netA_instance_vm_details = [netA_instance_vm_name, image_name, flavor_id]
-        netB_instance_vm_details = [netB_instance_vm_name, image_name, flavor_id]
-
-        nova_netA, nova_instance_details_netA = self.create_net_subnet_nova_instance(test_netA_name, test_sub_netA_cidr, netA_instance_vm_details, test_management_type)
-        nova_netB, nova_instance_details_netB = self.create_net_subnet_nova_instance(test_netB_name, test_sub_netB_cidr, netB_instance_vm_details, test_management_type)
-
-        assert_equal(nova_instance_details_netA.status, 'ACTIVE')
-        assert_equal(nova_instance_details_netB.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print nova_instance_details_netA.addresses
-        print nova_instance_details_netB.addresses
-        address_1st_instance = nova_instance_details_netA.addresses
-        address_2nd_instance = nova_instance_details_netB.addresses
-        print 'Nova 1st instance public ip = %s'%(address_1st_instance[test_netA_name][0]['addr'])
-        print 'Nova 2nd instance public ip = %s'%(address_2nd_instance[test_netB_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_netA_name][0]['addr'],source_tenants_details =address_2nd_instance[test_netB_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova_netA, nova_instance_details_netA)
-        self.nova_instance_deletion(nova_netB, nova_instance_details_netB)
-        time.sleep(5)
-        self.neutron_network_deletion(test_netA_name)
-        self.neutron_network_deletion(test_netB_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_private_network_instance_and_validate_connectivity_from_host_machine_or_compute_node(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under private network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under private network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is not getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_25_net_private'
-        test_sub_net_cidr = ["private","10.160.160.192/26",'10.160.160.193']
-        test_management_type = "private"
-        instance_vm_name = 'vtn_test_25_nova_instance_private_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance private ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_private_network_instance_and_validate_connectivity_to_external_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under private network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under private network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_26_net_private'
-        test_sub_net_cidr = ["private","10.160.160.192/26",'10.160.160.193']
-        test_management_type = "private"
-        instance_vm_name = 'vtn_test_26_nova_instance_private_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance private ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'], check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_private_network_with_two_instances_and_check_connectvity_between_two_instances(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under private network
-        3. Do GET Rest API and validate creation of network
-        4. Create first nova instance under private network
-        5. Validate first nova instance is created on nova service
-        6. Create second nova instance under public network
-        7. Validate second nova instance is created on nova service
-        8. Now try to ping from one nova instance to other instance, should not success
-        """
-        test_net_name = 'vtn_test_27_net_private'
-        test_sub_net_cidr = ["private","10.160.160.192/26",'10.160.160.193']
-        test_management_type = "private"
-        first_instance_vm_name = 'vtn_test_27_nova_1st_instance_private_net'
-        second_instance_vm_name = 'vtn_test_27_nova_2nd_instance_private_net'
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        first_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,first_instance_vm_name,image_name,flavor_id)
-        second_nova_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,second_instance_vm_name,image_name,flavor_id)
-        assert_equal(first_instance_details.status, 'ACTIVE')
-        assert_equal(second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print first_nova_instance_details.addresses
-        print second_nova_instance_details.addresses
-        address_1st_instance = first_nova_instance_details.addresses
-        address_2nd_instance = second_nova_instance_details.addresses
-        print 'Nova 1st instance private ip = %s'%(address_1st_instance[test_net_name][0]['addr'])
-        print 'Nova 2nd instance private ip = %s'%(address_2nd_instance[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_net_name][0]['addr'],source_tenants_details =address_2nd_instance[test_net_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, first_nova_instance_details)
-        self.nova_instance_deletion(nova, second_nova_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_two_private_network_instances_and_validate_connectivity_between_two_networks_via_private_network(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under private network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under private network
-        5. Validate new nova instance is created on nova service
-        """
-        test_netA_name = 'vtn_test_28_netA_private'
-        test_sub_netA_cidr = ["private","10.160.160.192/26",'10.160.160.193']
-        netA_instance_vm_name = 'vtn_test_28_nova_netA_instance_private_net'
-        test_netB_name = 'vtn_test_28_netB_private'
-        test_sub_netB_cidr = ["private","10.160.161.192/26",'10.160.161.193']
-        netB_instance_vm_name = 'vtn_test_28_nova_netB_instance_private_net'
-        test_management_type = "private"
-        image_name = "vsg-1.1"
-        flavor_id = 'm1.small'
-        netA_instance_vm_details = [netA_instance_vm_name, image_name, flavor_id]
-        netB_instance_vm_details = [netB_instance_vm_name, image_name, flavor_id]
-
-        nova_netA, nova_instance_details_netA = self.create_net_subnet_nova_instance(test_netA_name, test_sub_netA_cidr, netA_instance_vm_details, test_management_type)
-        nova_netB, nova_instance_details_netB = self.create_net_subnet_nova_instance(test_netB_name, test_sub_netB_cidr, netB_instance_vm_details, test_management_type)
-
-        assert_equal(nova_instance_details_netA.status, 'ACTIVE')
-        assert_equal(nova_instance_details_netB.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print 'New nova instance ip addresses are '
-        print nova_instance_details_netA.addresses
-        print nova_instance_details_netB.addresses
-        address_1st_instance = nova_instance_details_netA.addresses
-        address_2nd_instance = nova_instance_details_netB.addresses
-        print 'Nova 1st instance private ip = %s'%(address_1st_instance[test_netA_name][0]['addr'])
-        print 'Nova 2nd instance private ip = %s'%(address_2nd_instance[test_netB_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address_1st_instance[test_netA_name][0]['addr'],source_tenants_details =address_2nd_instance[test_netB_name][0]['addr'], check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova_netA, nova_instance_details_netA)
-        self.nova_instance_deletion(nova_netB, nova_instance_details_netB)
-        time.sleep(5)
-        self.neutron_network_deletion(test_netA_name)
-        self.neutron_network_deletion(test_netB_name)
-        assert_equal(status, False)
-
-    def test_cordvtn_creating_management_and_public_network_instances_and_validate_connectivity_from_host_machine_or_compute_node_and_validate_connectivity_to_internet(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_two_networks_name = ['vtn_test_29_net_management','vtn_test_29_net_public']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["public","10.6.1.192/26",'10.6.1.193']]
-        test_management_type = ["management_local", 'public']
-        instance_vm_name = 'vtn_test_29_nova_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_network_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s and public ip %s'%(address[test_two_networks_name[0]][0]['addr'],address[test_two_networks_name[1]][0]['addr'])
-        print address[test_two_networks_name[0]][0]['addr']
-        print nova.security_groups.list()
-        print address[test_two_networks_name[1]][0]['addr']
-        print nova.security_groups.list()
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_management_and_public_network_instance_with_and_without_pause_and_validate_connectivity_from_host_machine_or_compute_node_and_validate_connectivity_to_internet(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now pause the nova instance and check connectivity
-        8. Now unpause the nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_30_net_management','vtn_test_30_net_public']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["public","10.6.1.192/26",'10.6.1.193']]
-        test_management_type = ["management_local", 'public']
-        instance_vm_name = 'vtn_test_30_nova_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s and public ip %s'%(address[test_two_networks_name[0]][0]['addr'],address[test_two_networks_name[1]][0]['addr'])
-        print address[test_two_networks_name[0]][0]['addr']
-        print nova.security_groups.list()
-        print address[test_two_networks_name[1]][0]['addr']
-        print nova.security_groups.list()
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_instance_details.pause()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_instance_details.unpause()
-        print 'Nova instance is paused and unpasued now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_management_and_public_network_instance_doing_suspend_and_resume_validating_connectivity_from_host_machine_or_compute_node_and_validate_connectivity_to_internet(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now suspend the nova instance and check connectivity
-        8. Now resume the nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_31_net_management','vtn_test_31_net_public']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["public","10.6.1.192/26",'10.6.1.193']]
-        test_management_type = ["management_local", 'public']
-        instance_vm_name = 'vtn_test_31_nova_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s and public ip %s'%(address[test_two_networks_name[0]][0]['addr'],address[test_two_networks_name[1]][0]['addr'])
-        print address[test_two_networks_name[0]][0]['addr']
-        print nova.security_groups.list()
-        print address[test_two_networks_name[1]][0]['addr']
-        print nova.security_groups.list()
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_instance_details.suspend()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_instance_details.resume()
-        print 'Nova instance is suspend and resumed now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_public_network_instance_with_stopping_and_starting_instances_and_checking_connectvity_from_host_machine_or_compute_node_and_validate_connectivity_to_internet(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now stop the nova instance and check connectivity
-        8. Now start the nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_32_net_management','vtn_test_32_net_public']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["public","10.6.1.192/26",'10.6.1.193']]
-        test_management_type = ["management_local", 'public']
-        instance_vm_name = 'vtn_test_32_nova_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s and public ip %s'%(address[test_two_networks_name[0]][0]['addr'],address[test_two_networks_name[1]][0]['addr'])
-        print address[test_two_networks_name[0]][0]['addr']
-        print nova.security_groups.list()
-        print address[test_two_networks_name[1]][0]['addr']
-        print nova.security_groups.list()
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_instance_details.stop()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_instance_details.start()
-        print 'Nova instance is stopped and started now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_private_network_instance_and_validate_connectivity_from_host_machine_or_compute_node_and_validate_connectivity_to_internet(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_two_networks_name = ['vtn_test_33_net_management','vtn_test_33_net_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193']]
-        test_management_type = ["management_local", 'private']
-        instance_vm_name = 'vtn_test_33_nova_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s and private ip %s'%(address[test_two_networks_name[0]][0]['addr'],address[test_two_networks_name[1]][0]['addr'])
-        print address[test_two_networks_name[0]][0]['addr']
-        print nova.security_groups.list()
-        print address[test_two_networks_name[1]][0]['addr']
-        print nova.security_groups.list()
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-        status_1, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_to_external")
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, False)
-
-    def test_cordvtn_creating_mgmt_and_private_network_with_two_instances_and_validate_connectivity_from_host_machine_or_compute_node_and_check_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_two_networks_name = ['vtn_test_34_net_management','vtn_test_34_net_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193']]
-        test_management_type = ["management_local", 'private']
-        first_instance_vm_name = 'vtn_test_34_nova_first_instance_management_net'
-        second_instance_vm_name = 'vtn_test_34_nova_second_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[1]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_private_network_with_two_instances_with_and_without_pause_validating_connectivity_from_host_machine_or_compute_node_and_validating_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now pause one of the nova instance and check connectivity
-        8. Now start the same nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_35_net_management','vtn_test_35_net_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193']]
-        test_management_type = ["management_local", 'private']
-        first_instance_vm_name = 'vtn_test_35_nova_first_instance_management_net'
-        second_instance_vm_name = 'vtn_test_35_nova_second_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[1]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_first_instance_details.pause()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_first_instance_details.unpause()
-        print 'Nova instance is paused and unpased now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_private_network_with_two_instances_and_doing_suspend_and_resume_validating_connectivity_from_host_machine_or_compute_node_and_validating_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now suspend one of the nova instance and check connectivity
-        8. Now resume the same nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_36_net_management','vtn_test_36_net_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193']]
-        test_management_type = ["management_local", 'private']
-        first_instance_vm_name = 'vtn_test_36_nova_first_instance_management_net'
-        second_instance_vm_name = 'vtn_test_36_nova_second_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[1]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[1]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_first_instance_details.suspend()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_first_instance_details.resume()
-        print 'Nova instance is suspend and resume now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_private_network_with_two_instances_applying_stop_and_start_validating_connectivity_from_host_machine_or_compute_node_and_validating_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Now stop one of the nova instance and check connectivity
-        8. Now start the same nova instance and check connectivity
-        """
-        test_two_networks_name = ['vtn_test_37_net_management','vtn_test_37_net_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193']]
-        test_management_type = ["management_local", 'private']
-        first_instance_vm_name = 'vtn_test_37_nova_first_instance_management_net'
-        second_instance_vm_name = 'vtn_test_37_nova_second_instance_management_net'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,2):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(test_two_networks_name,nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[1]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-        print nova.security_groups.list()
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is False or status_2 is False:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-        new_first_instance_details.stop()
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        if status_1 is True or status_2 is True:
-           self.nova_instance_deletion(nova, new_first_instance_details)
-           self.nova_instance_deletion(nova, new_second_instance_details)
-           time.sleep(3)
-           self.neutron_network_deletion(test_two_networks_name[0])
-           self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, False)
-        assert_equal(status_2, False)
-        new_first_instance_details.start()
-        print 'Nova instance is stopped and started now checking connectivity'
-        time.sleep(60)
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_creating_mgmt_and_two_private_network_with_each_instances_and_validate_connectivity_from_host_machine_or_compute_node_and_check_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Verify ping is getting successful after ssh toof one instance to other nova instance which is created in step 4.
-        """
-        test_two_networks_name = ['vtn_test_39_net_management','vtn_test_39_netA_private','vtn_test_39_netB_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193'], ["private","10.160.161.192/26",'10.160.161.193']]
-        test_management_type = ["management_local", 'private','private']
-        first_instance_vm_name = 'vtn_test_39_nova_first_instance_management_netA'
-        second_instance_vm_name = 'vtn_test_39_nova_second_instance_management_netB'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,3):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(['vtn_test_39_net_management','vtn_test_39_netA_private'],nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(['vtn_test_39_net_management','vtn_test_39_netB_private'],nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[2]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, False)
-
-    def test_cordvtn_service_dependency_without_xos_creating_mgmt_and_two_private_network_with_each_instances_and_validate_connectivity_from_host_machine_or_compute_node_and_check_connectivity_to_other_instance(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        7. Verify ping is getting successful after ssh toof one instance to other nova instance which is created in step 4.
-        """
-        test_two_networks_name = ['vtn_test_40_net_management','vtn_test_40_netA_private','vtn_test_40_netB_private']
-        test_two_sub_networks_cidr = [["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"], ["private","10.160.160.192/26",'10.160.160.193'], ["private","10.160.161.192/26",'10.160.161.193']]
-        test_management_type = ["management_local", 'private','private']
-        first_instance_vm_name = 'vtn_test_40_nova_first_instance_management_netA'
-        second_instance_vm_name = 'vtn_test_40_nova_second_instance_management_netB'
-#        image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        for test_net_name in test_two_networks_name:
-            result = self.neutron_network_creation_and_validation(test_net_name)
-            assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        #for test_net_name,test_sub_net_cidr in test_two_networks_name test_two_sub_networks_cidr:
-        for i in range(0,3):
-           networks = neutron.list_networks(name=test_two_networks_name[i])
-           network_id = self.get_key_value(d=networks, key = 'id')
-           sub_result = self.neutron_subnet_creation_and_validation(test_two_networks_name[i],test_two_sub_networks_cidr[i])
-           assert_equal(sub_result[0], True)
-           net_type_post = self.sub_network_type_post_to_onos(test_two_networks_name[i], test_management_type[i])
-           print net_type_post
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        print nova.security_groups.list()
-        new_first_instance_details = self.nova_instance_creation_and_validation(['vtn_test_40_net_management','vtn_test_40_netA_private'],nova,first_instance_vm_name,image_name,flavor_id)
-        new_second_instance_details = self.nova_instance_creation_and_validation(['vtn_test_40_net_management','vtn_test_40_netB_private'],nova,second_instance_vm_name,image_name,flavor_id)
-        time.sleep(60)
-        assert_equal(new_first_instance_details.status, 'ACTIVE')
-        assert_equal(new_second_instance_details.status, 'ACTIVE')
-
-        service_dependency_post = self.service_dependency_on_network_post_to_onos(test_two_networks_name[1],test_two_networks_name[2],test_management_type[1])
-        print service_dependency_post
-        compute_details = self.get_compute_nodes()
-        first_instance_address = new_first_instance_details.addresses
-        second_instance_address = new_second_instance_details.addresses
-        print 'Nova first instance management ip = %s and private ip %s'%(first_instance_address[test_two_networks_name[0]][0]['addr'],first_instance_address[test_two_networks_name[1]][0]['addr'])
-        print 'Nova second instance management ip = %s and private ip %s'%(second_instance_address[test_two_networks_name[0]][0]['addr'],second_instance_address[test_two_networks_name[2]][0]['addr'])
-        secgroup = nova.security_groups.find(name="default")
-#        nova.security_group_rules.create(secgroup.id,ip_protocol="tcp",
-                                     #from_port="22",
-                                     #to_port="22",
-                                    # cidr="0.0.0.0/0",)
-
- #       nova.security_group_rules.create(secgroup.id,
-                                    # ip_protocol="icmp",
-                                    # from_port=-1,
-                                    # cidr="0.0.0.0/0",
-                                    # to_port=-1)
-        print nova.security_groups.list()
-
-        status_1, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[0]][0]['addr'])
-        status_2, output = self.nova_instance_tenants_access_check(first_instance_address[test_two_networks_name[1]][0]['addr'],source_tenants_details = second_instance_address[test_two_networks_name[0]][0]['addr'],check_type = "Ping_from_source_tenant")
-        self.nova_instance_deletion(nova, new_first_instance_details)
-        self.nova_instance_deletion(nova, new_second_instance_details)
-        time.sleep(3)
-        self.neutron_network_deletion(test_two_networks_name[0])
-        self.neutron_network_deletion(test_two_networks_name[1])
-        assert_equal(status_1, True)
-        assert_equal(status_2, True)
-
-    def test_cordvtn_management_network_instance_and_validate_connectivity_from_host_machine_or_compute_node_after_br_int_bridge_is_down(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_41_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_41_nova_instance_management_net'
-        #image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        if status is False:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(5)
-           self.neutron_network_deletion(test_net_name)
-        assert_equal(status, True)
-        cmd = 'sudo ifconfig br-int down'
-        #compute_details = self.get_compute_nodes()
-        compute_details = '10.1.0.17'
-        ssh_agent = SSHTestAgent(host = compute_details)
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        print output
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        cmd = 'sudo ifconfig br-int up'
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        assert_equal(status, False)
-
-    def test_cordvtn_management_network_instance_and_validate_connectivity_from_host_machine_or_compute_node_toggling_br_int_bridge(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_42_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_42_nova_instance_management_net'
-        #image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        cmd = 'sudo ifconfig br-int down'
-        #compute_details = self.get_compute_nodes()
-        compute_details = '10.1.0.17'
-        ssh_agent = SSHTestAgent(host = compute_details)
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        print output
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        if status is True:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(5)
-           self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-        cmd = 'sudo ifconfig br-int up'
-        #compute_details = self.get_compute_nodes()
-        compute_details = '10.1.0.17'
-        ssh_agent = SSHTestAgent(host = compute_details)
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        print output
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, True)
-
-    def test_cordvtn_management_network_instance_and_validate_connectivity_from_host_machine_or_compute_node_checking_onos_flows(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_43_net_management'
-        test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_43_nova_instance_management_net'
-        #image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-        cmd = 'sudo ifconfig br-int down'
-        #compute_details = self.get_compute_nodes()
-        compute_details = '10.1.0.17'
-        ssh_agent = SSHTestAgent(host = compute_details)
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        print output
-        result = self.neutron_network_creation_and_validation(test_net_name)
-        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-        assert_equal(sub_result[0], True)
-        net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-        assert_equal(new_instance_details.status, 'ACTIVE')
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        self.cliEnter()
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        print flows['IPV4_DST']
-        self.cliExit()
-
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        if status is True:
-           self.nova_instance_deletion(nova, new_instance_details)
-           time.sleep(5)
-           self.neutron_network_deletion(test_net_name)
-        assert_equal(status, False)
-        cmd = 'sudo ifconfig br-int up'
-        #compute_details = self.get_compute_nodes()
-        compute_details = '10.1.0.17'
-        ssh_agent = SSHTestAgent(host = compute_details)
-        status, output = ssh_agent.run_cmd(cmd, timeout = 5)
-        print output
-        self.cliEnter()
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        print flows
-        self.cliExit()
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, True)
-        self.cliEnter()
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        print flows
-        self.cliExit()
-
-        ##### We can't test port-create scenarios on CiaB setup.  #### To-DO
-    def test_cordvtn_creating_vtn_with_vlan_port_connectivity_and_validate_connectivity_from_host_machine_or_compute_node(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Create subnetwork whose ip is under management network
-        3. Do GET Rest API and validate creation of network
-        4. Create new nova instance under management network
-        5. Validate new nova instance is created on nova service
-        6. Verify ping is getting successful from compute node to nova instance which is created in step 4.
-        """
-        test_net_name = 'vtn_test_41_net_vlan_port'
-#       test_sub_net_cidr = ["management","172.27.0.0/24", "172.27.0.20", "172.27.0.21"]
-#        test_management_type = "management_local"
-        instance_vm_name = 'vtn_test_41_nova_instance_vlan_port_net'
-        #image_name = "vsg-1.1"
-        image_name = "trusty-server-multi-nic"
-        flavor_id = 'm1.small'
-#        result = self.neutron_network_creation_and_validation(test_net_name)
-#        assert_equal(result, True)
-        neutron_creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**neutron_creds)
-        networks = neutron.list_networks(name=test_net_name)
-        network_id = self.get_key_value(d=networks, key = 'id')
-#        sub_result = self.neutron_subnet_creation_and_validation(test_net_name,test_sub_net_cidr)
-#        assert_equal(sub_result[0], True)
- #       net_type_post = self.sub_network_type_post_to_onos(test_net_name, test_management_type)
-        creds = get_nova_credentials()
-        nova = nova_client.Client('2', **creds)
-#        new_instance_details = self.nova_instance_creation_and_validation(test_net_name,nova,instance_vm_name,image_name,flavor_id)
-#        assert_equal(new_instance_details.status, 'ACTIVE')
-        #body_port_details = {"port": {"admin_state_up" :"True","device_id" :new_instance_details.id, "name":"stag-100","network_id":network_id}}
-        body_port_details = {"port": {"admin_state_up" :"True","device_id" :"", "name":"stag-100","network_id":network_id}}
-        response = neutron.create_port(body=body_port_details)
-        print(response)
-        """
-        compute_details = self.get_compute_nodes()
-        print new_instance_details.addresses
-        address = new_instance_details.addresses
-        print 'Nova instance management ip = %s'%(address[test_net_name][0]['addr'])
-        time.sleep(60)
-        status, output = self.nova_instance_tenants_access_check(address[test_net_name][0]['addr'])
-        self.nova_instance_deletion(nova, new_instance_details)
-        time.sleep(5)
-        self.neutron_network_deletion(test_net_name)
-        assert_equal(status, True)
-        """
-
-    def test_cordvtn_with_neutron_network_creation_and_validation_on_head_node_with_neutron_service(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "Net-1","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        networks = neutron.list_networks(name='Net-1')
-        vtn_util = vtn_validation_utils('')
-        data = networks
-        result = self.search_value(data, "Net-1")
-        assert_equal(result, True)
-
-    def test_cordvtn_neutron_network_creation_and_validation_on_onos(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "Net-1","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        networks = neutron.list_networks(name='Net-1')
-        vtn_util = vtn_validation_utils('')
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "Net-1")
-        assert_equal(result, True)
-
-    def test_cordvtn_neutron_network_deletion_and_validation_on_neutron_openstack(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "Net-1","admin_state_up":False}}
-        net = neutron.delete_network("Net-1")
-        networks = neutron.list_networks(name='Net-1')
-        vtn_util = vtn_validation_utils('')
-        data = networks
-        result = self.search_value(data, "Net-1")
-        assert_equal(result, True)
-
-    def test_cordvtn_neutron_network_sync(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created network in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "Test-Net-1","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        url = "http://{0}:8181/onos/cordvtn/serviceNetworks".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-        body_create_subnet = {'subnets': [{'cidr': '192.168.199.0/24',
-                             'ip_version': 4, 'network_id': network_id}]}
-
-        subnet = neutron.create_subnet(body=body_create_subnet)
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, "Test-Net-1")
-        assert_equal(result, True)
-
-    def test_cordvtn_neutron_port_sync(self):
-        """
-        Test Method:
-        0. Create Test-Net,
-        1. Load cordvtn config, vtn-cfg-1.json to cord-onos
-        2. Run sync command for cordvtn
-        3. Do GET Rest API and validate creation of network
-        4. Validate network synch with created port in cord-onos
-        """
-        creds = self.get_neutron_credentials()
-        neutron = neutronclient.Client(**creds)
-        body_example = {"network":{"name": "Test-Net-1","admin_state_up":True}}
-        net = neutron.create_network(body=body_example)
-        network_id = net['network']['id']
-        device_id = 'of:{}'.format(get_mac(self.switch))
-        body_example = {'port': {'admin_state_up': True,'device_id':device_id, 'network_id':network_id}}
-        response = neutron.create_port(body=body_example)
-        url = "http://{0}:8181/onos/cordvtn/servicePorts".format(vtn_util.endpoint)
-        auth = ('karaf','karaf')
-
-        resp = requests.get(url=url, auth=auth)
-        data = json.loads(resp.text)
-        result = self.search_value(data, device_id)
-        assert_equal(result, True)
-
-    def test_cordvtn_creating_virtual_private_network(self):
-	"""
-	Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Verify that NetA is being created and validate IP in nova list command.
-	    5) Verify that flow is being added in ovs-switch in compute-node.
-	    6) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	"""
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network(self):
-	"""
-	Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Verify that NetA is being created and validate IP in nova list command.
-	    5) Verify that flow is being added in ovs-switch in compute-node.
-	    6) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	"""
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network(self):
-	"""
-	Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Verify that NetA is being created and validate IP in nova list command.
-	    5) Verify that flow is being added in ovs-switch in compute-node.
-	    6) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	"""
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network(self):
-	"""
-	Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Verify that NetA is being created and validate IP in nova list command.
-	    5) Verify that flow is being added in ovs-switch in compute-node.
-	    6) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	"""
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network_and_boot_image(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network_and_boot_image(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_2_images_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_2_images_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network_and_boot_2_images_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network_and_boot_2_images_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_connectivity_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_floating_IP_with_vlan_connectivity_network_and_boot_image_connectivity_negative_scenario(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_private_network_and_boot_image_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and private ip on VM.
-	    8) Create a same virtual interface with valn tag and  private ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_public_network_and_boot_image_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and public ip on VM.
-	    8) Create a same virtual interface with valn tag and any pulic ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_local_management_network_and_boot_image_connectivity(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_private_network_and_boot_image_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and private floating ip on VM.
-	    8) Create a same virtual interface with valn tag and  private floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_public_network_and_boot_image_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and public floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any pulic floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_local_management_network_and_boot_image_connectivity(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	    10) verify that ping is successful
-	    11) Verify that flow is being added in ovs-switch in compute-node.
-	    12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    13) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_public_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the private network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_local_management_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the private network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_vlan_connectivity_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the private network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_floating_IP_with_vlan_connectivity_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the private network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_local_management_other_public_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_vlan_connectivity_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_floating_IP_with_vlan_connectivity_and_a_private_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_vlan_connectivity_other_local_management_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_floating_IP_with_vlan_connectivity_other_local_management_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_one_virtual_floating_IP_with_vlan_connectivity_other_virtual_vlan_network_and_boot_2_images_in_same_service_connectivity_negative_scenario(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in the public network
-	    8) verify that ping is not successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	    10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    11) Verify that cord-onos pushed flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_2_images_with_invalid_public_field_of_onos_network_cfg_json_in_same_service(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push network_cfg.json config file to onos with an invalid public gateway ip in network_cfg.json file.
-	    4) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Verify ping from VM to public gateway which is send to ONOS through rest API in network_cfg.json file.
-	   12) 11th step should be failed due to we are passing invalid public IP as gatway and we have not seen any flows in OVS for it.
-	   13) Now ping one VM to other VM it should not ping again even it in the same service.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_with_invalid_localManagementIp_field_of_onos_network_cfg_json(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push network_cfg.json config file to onos with an invalid localManagement ip in network_cfg.json file.
-	    4) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    5) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    6) Wait till VM boots up and starts running.
-	    7) Verify that a VM is launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Verify ping from VM to local management ip which is send to ONOS through rest API in network_cfg.json file.
-	   12) 11th step should be failed due to we are passing invalid local management IP and we have not seen any flows in OVS for it.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image_with_invalid_OVSDB_port_field_of_onos_network_cfg_json(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push network_cfg.json config file to onos with an invalid ovsdb port in network_cfg.json file.
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that flows are  being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos did not push any flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image_with_invalid_OpenStack_details_in_onos_network_cfg_json(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push network_cfg.json config file to onos with an invalid openstack in network_cfg.json file.
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that no flows are being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin is not being received a message from openstack service neutron.
-	    9) Verify that cord-onos did not push any flows to OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_image_with_invalid_compute_node_details_in_onos_network_cfg_json(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push network_cfg.json config file to onos with an invalid compute node details in network_cfg.json file.
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Verify that no flows are being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin is not being received a message from openstack service neutron.
-	    9) Verify that cord-onos did not push any flows to OVS switch.
-        """
-	pass
-
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_in_different_services_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetB with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-	   10) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   11) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_public_networks_and_boot_images_in_different_service_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetB with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	       (neutron net-create net-A-public, neutron subnet-create net-B-public 198.1.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-	   10) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   11) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_local_management_networks_and_boot_images_in_different_service_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetB with an IP as local management network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.28.0.0/24 -gateway 172.28.0.1).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-	   10) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   11) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_vlan_connectivity_networks_and_boot_images_in_different_service_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetB with a vlan port-create.
-	       (neutron port-create net-A-private --name stag-100).
-	       (neutron port-create net-B-private --name stag-200).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg1-01
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-	   10) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   11) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-    def test_cordvtn_creating_two_virtual_floating_IP_with_vlan_connectivity_networks_and_boot_images_in_different_service_connectivity(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetB with a floating ip and vlan port-create.
-	       (neutron port-create net-A-private --name stag-500).
-	       (neutron port-create net-B-private --name stag-500).
-	    4) Now boot two images in the same created network using nova boot image command (example given below :-
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-01
-		   nova boot --image 6ba954df-063f-4379-9e2a-920050879918 --flavor 2 --nic port-id=2c7a397f-949e-4502-aa61-2c9cefe96c74 --user-data passwd.data vsg-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Verify that flow is being added in ovs-switch in compute-node.
-	    8) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	    9) Verify that cord-onos pushed flows to OVS switch.
-	   10) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   11) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_for_services_dependency_with_out_xos_direct_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push service dependency data.json file to onos to subscriber of other service.
-		$ curl -X POST -H "Content-Type: application/json" -u onos:rocks -d @data.json http://$OC1:8181/onos/cordvtn/serviceNetworks
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-A to other VM which is in Net-B, should ping.
-	   12) Verify that flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_for_services_dependency_with_out_xos_indirect_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push service dependency data.json file to onos to subscriber of other service.
-		$ curl -X POST -H "Content-Type: application/json" -u onos:rocks -d @data.json http://$OC1:8181/onos/cordvtn/serviceNetworks
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-B to other VM which is in Net-A, capture packets on port for ICMP request packets.
-	   12) Verify that flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_remove_services_dependency_with_out_xos_direct_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push service dependency data.json file to onos to subscriber of other service.
-		$ curl -X POST -H "Content-Type: application/json" -u onos:rocks -d @data.json http://$OC1:8181/onos/cordvtn/serviceNetworks
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-A to other VM which is in Net-B, should ping.
-	   12) Verify that flows are being added in the OVS switch.
-	   13) Push config data with outservice dependency in data.json file to onos to subscriber of other service.
-	   14) Now ping from VM which is Net-A to other VM which is in Net-B, should not ping.
-	   15) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_remove_services_dependency_with_out_xos_indirect_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push service dependency data.json file to onos to subscriber of other service.
-		$ curl -X POST -H "Content-Type: application/json" -u onos:rocks -d @data.json http://$OC1:8181/onos/cordvtn/serviceNetworks
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-B to other VM which is in Net-A, capture packets on port for ICMP request packets.
-	   12) Verify that flows are being added in the OVS switch.
-	   13) Push config data with out service dependency in data.json file to onos to subscriber of other service.
-	   14) Now ping from VM which is Net-B to other VM which is in Net-A, should not see any ICMP request packets on port.
-	   15) Verify that no flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_for_services_dependency_with_xos_direct_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Validate that XOS is up and running.
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-A to other VM which is in Net-B, should ping.
-	   12) Verify that flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_creating_two_virtual_private_networks_and_boot_images_for_services_dependency_with_xos_indirect_access(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Validate that XOS is up and running.
-	    4) From CORD-Test container, use python-neutron client and create two networks with name - NetA and NetBwith an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	       (neutron net-create net-B-private, neutron subnet-create net-B-private 10.1.0.0/24).
-	    5) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-B-vm-01
-	    6) Wait till VMs boot up and running.
-	    7) Verify that two VMs are launched and running by using novaclient python API.
-	    8) Verify that flow is being added in ovs-switch in compute-node.
-	    9) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   10) Verify that cord-onos pushed flows to OVS switch.
-	   11) Now ping from VM which is Net-B to other VM which is in Net-A, should ping.
-	   12) Verify that flows are being added in the OVS switch.
-        """
-	pass
-
-    def test_cordvtn_with_access_agent_serviceType_and_vtn_location_field_network_cfg_connectivity_to_access_device(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push access-agent additional network_cfg to ONOS and specify vtn-location field info must be access-agent container.
-	    4) Launch the access-agent and access-device containers and then restart openstack compute node.
-	       $ sudo docker run --privileged --cap-add=ALL -d --name access-agent -t ubuntu:14.04 /bin/bash
-	    5) Create each interface on br-int and br-mgmt using pipework on access-agent containers
-	       $ sudo ./pipework br-mgmt -i eth1 access-agent 10.10.10.20/24
-	       $ sudo ./pipework br-int -i eth2 access-agent 10.168.0.100/24 fa:00:00:00:00:11
-	    6) We ahve to stop ONOS service to test this
-		 onos-service stop
-		 sudo ovs-ofctl -O OpenFlow13 del-flows br-int "arp"
-	    7) Now attach to access-agent container and ping to access-device
-	    8) Verify that ping should be success and flows are being added in br-int.
-        """
-	pass
-
-    def test_cordvtn_with_access_agent_serviceType_and_vtn_location_field_in_network_cfg_connectivity_to_head_node(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push access-agent additional network_cfg to ONOS and specify vtn-location field info must be access-agent container.
-	    4) Launch the access-agent and access-device containers and then restart openstack compute node.
-	       $ sudo docker run --privileged --cap-add=ALL -d --name access-agent -t ubuntu:14.04 /bin/bash
-	    5) Create each interface on br-int and br-mgmt using pipework on access-agent containers
-	       $ sudo ./pipework br-mgmt -i eth1 access-agent 10.10.10.20/24
-	       $ sudo ./pipework br-int -i eth2 access-agent 10.168.0.100/24 fa:00:00:00:00:11
-	    6) We ahve to stop ONOS service to test this
-		 onos-service stop
-		 sudo ovs-ofctl -O OpenFlow13 del-flows br-int "arp"
-	    7) Now attach to access-agent container and ping to head node
-	    8) Verify that ping should be success and flows are being added in br-int.
-        """
-	pass
-
-    def test_cordvtn_with_access_agent_serviceType_and_invalid_vtn_location_field_network_cfg_connectivity_to_access_device(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push access-agent additional network_cfg to ONOS and specify vtn-location field info must not be access-agent container.
-	    4) Launch the access-agent and access-device containers and then restart openstack compute node.
-	       $ sudo docker run --privileged --cap-add=ALL -d --name access-agent -t ubuntu:14.04 /bin/bash
-	    5) Create each interface on br-int and br-mgmt using pipework on access-agent containers
-	       $ sudo ./pipework br-mgmt -i eth1 access-agent 10.10.10.20/24
-	       $ sudo ./pipework br-int -i eth2 access-agent 10.168.0.100/24 fa:00:00:00:00:11
-	    6) We ahve to stop ONOS service to test this
-		 onos-service stop
-		 sudo ovs-ofctl -O OpenFlow13 del-flows br-int "arp"
-	    7) Now attach to access-agent container and ping to access-device
-	    8) Verify that ping should not be success and no flows are being added in br-int.
-        """
-	pass
-
-    def test_cordvtn_with_access_agent_serviceType_and_invalid_vtn_location_field_in_network_cfg_connectivity_to_head_node(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) Push access-agent additional network_cfg to ONOS and specify vtn-location field info must not be access-agent container.
-	    4) Launch the access-agent and access-device containers and then restart openstack compute node.
-	       $ sudo docker run --privileged --cap-add=ALL -d --name access-agent -t ubuntu:14.04 /bin/bash
-	    5) Create each interface on br-int and br-mgmt using pipework on access-agent containers
-	       $ sudo ./pipework br-mgmt -i eth1 access-agent 10.10.10.20/24
-	       $ sudo ./pipework br-int -i eth2 access-agent 10.168.0.100/24 fa:00:00:00:00:11
-	    6) We ahve to stop ONOS service to test this
-		 onos-service stop
-		 sudo ovs-ofctl -O OpenFlow13 del-flows br-int "arp"
-	    7) Now attach to access-agent container and ping to head node
-	    8) Verify that ping should not be success and no flows are being added in br-int.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service_connectivity_after_restarting_VMs(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart both VMs in same service and repeat steps 7 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service_connectivity_after_restarting_cord_onos(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart ONOS service and repeat steps 7 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service_connectivity_after_delete_any_VM_recreating_it(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete a VM which was created earlier and repeat steps 4 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_private_network_and_boot_2_images_in_same_service_connectivity_after_delete_and_add_br_int_bridge(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as private network.
-	       (neutron net-create net-A-private, neutron subnet-create net-A-private 10.0.0.0/24).
-	    4) Now boot 2 images in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-02
-	    5) Wait till VMs boot up and running.
-	    6) Verify that two VMs are launched and running by using novaclient python API.
-	    7) Now ping to the VM from other VM which are launched in same network
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete a br_int bridge and repeat steps 7 to 11, (it should not ping)
-	   13) Add br_int bridge and repeat steps 7 to 11, (it should ping)
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity_after_restarting_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart the VM in service and repeat steps 7 to 11.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity_after_restarting_cord_onos(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart onos service container and repeat steps 7 to 11.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity_after_delete_and_recreate_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete and re-create a VM in the same service and repeat steps 7 to 11.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_public_network_and_boot_image_connectivity_after_delete_and_add_br_int_bridge(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as public network.
-	       (neutron net-create net-A-public, neutron subnet-create net-A-public 198.0.0.0/24).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		$ nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from outside network which are internet network (global ping)
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete a br_int bridge and repeat steps 7 to 11, (it should not ping)
-	   13) Add br_int bridge and repeat steps 7 to 11, (it should ping)
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity_after_restarting_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart the VM in service and repeat steps 7 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity_after_restarting_cord_onos(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Restart the onos service and repeat steps 7 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity_after_delete_and_recreate_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete and re-create a VM in the same service and repeat steps 7 to 11.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_local_management_network_and_boot_image_connectivity_after_delete_and_add_br_int_bridge(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Now ping to the VM from compute node network which are launched a VM.
-	    8) verify that ping is successful
-	    9) Verify that flow is being added in ovs-switch in compute-node.
-	   10) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   11) Verify that cord-onos pushed flows to OVS switch.
-	   12) Delete a br_int bridge and repeat steps 7 to 11, (it should not ping)
-	   13) Add br_int bridge and repeat steps 7 to 11, (it should ping)
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_local_management_network_and_boot_image_connectivity_after_restarting_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Restart the VM in service and repeat steps 9 to 13.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_local_management_network_and_boot_image_connectivity_after_restarting_cord_onos(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Restart the ONOS service and repeat steps 9 to 13.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_local_management_network_and_boot_image_connectivity_after_delete_and_recreate_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Delete and re-create a VM in service and repeat steps 9 to 13.
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_local_management_network_and_boot_image_connectivity_after_delete_and_add_br_int_bridge(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Delete a br_int bridge and repeat steps 9 to 13, (it should not ping)
-	   15) Add br_int bridge and repeat steps 9 to 13, (it should ping)
-
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_local_management_network_and_boot_image_connectivity_after_restarting_VM(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Restart the VM in service and repeat steps 9 to 13.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_local_management_network_and_boot_image_connectivity_after_restarting_cord_onos(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Restart the ONOS service and repeat steps 9 to 13.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_local_management_network_and_boot_image_connectivity_after_delete_and_recreate_VM(self):
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Delete and re-create a VM in service and repeat steps 9 to 13.
-        """
-	pass
-
-    def test_cordvtn_creating_virtual_vlan_interface_floating_local_management_network_and_boot_image_connectivity_after_delete_and_add_br_int_bridge(self):
-
-        """
-	 Test Method:
-	    1) Validate that required openstack service is up and running.
-	    2) Validate that compute node is being created and get compute node name by using "sudo cord prov list".
-	    3) From CORD-Test container, use python-neutron client and create network with name - NetA with an IP as local management  network.
-	       (neutron net-create net-A-management, neutron subnet-create net-A-management 172.27.0.0/24 -gateway 172.27.0.1).
-	    4) Now boot image in the same created network using nova boot image command (example given below :-
-		 nova boot --image 3e2d7760-774a-4a16-be07-aaccafa779b6 --flavor 1 --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de --nic net-id=8bc19377-f493-4cad-af23-45fb299da9de net-A-vm-01
-	    5) Wait till VM boots up and starts running.
-	    6) Verify that a VM is launched and running by using novaclient python API.
-	    7) Create a virtual interface with vlan tag and local management floating ip on VM.
-	    8) Create a same virtual interface with valn tag and any local management floating ip on head node.
-	    9) Now ping to the VM from head node network which are launched a openstack service.
-	   10) verify that ping is successful
-	   11) Verify that flow is being added in ovs-switch in compute-node.
-	   12) Verify that onos-ml2 plugin syncs through ReST call  from openstack service neutron.
-	   13) Verify that cord-onos pushed flows to OVS switch.
-	   14) Delete a br_int bridge and repeat steps 9 to 13, (it should not ping)
-	   15) Add br_int bridge and repeat steps 9 to 13, (it should ping)
-        """
-	pass
-
diff --git a/src/test/cordvtn/credentials.py b/src/test/cordvtn/credentials.py
deleted file mode 100644
index 1035b7d..0000000
--- a/src/test/cordvtn/credentials.py
+++ /dev/null
@@ -1,72 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-
-def get_credentials():
-    n = {}
-    n['username'] = os.environ['OS_USERNAME']
-    n['password'] = os.environ['OS_PASSWORD']
-    n['auth_url'] = os.environ['OS_AUTH_URL']
-    n['tenant_name'] = os.environ['OS_TENANT_NAME']
-    return n
-
-def get_cinder_credentials():
-    n = [os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'],
-         os.environ['OS_TENANT_NAME'], os.environ['OS_AUTH_URL']]
-    return n
-
-def get_ceilo_credentials():
-    n = {}
-    n['os_username'] = os.environ['OS_USERNAME']
-    n['os_password'] = os.environ['OS_PASSWORD']
-    n['os_auth_url'] = os.environ['OS_AUTH_URL']
-    n['os_tenant_name'] = os.environ['OS_TENANT_NAME']
-    return n
-
-def get_nova_credentials():
-    n = {}
-    n['username'] = os.environ['OS_USERNAME']
-    n['api_key'] = os.environ['OS_PASSWORD']
-    n['auth_url'] = os.environ['OS_AUTH_URL']
-    n['project_id'] = os.environ['OS_TENANT_NAME']
-    return n
-
-def get_nova_credentials_v2():
-    n = {}
-    n['username'] = os.environ['OS_USERNAME']
-    n['api_key'] = os.environ['OS_PASSWORD']
-    n['auth_url'] = os.environ['OS_AUTH_URL']
-    n['project_id'] = os.environ['OS_TENANT_NAME']
-    return n
-
-def get_nova_credentials_v3():
-    n = {}
-    n['version'] = '3'
-    n['username'] = os.environ['OS_USERNAME']
-    n['password'] = os.environ['OS_PASSWORD']
-    n['project_id'] = os.environ['OS_TENANT_NAME']
-    n['auth_url'] = os.environ['OS_AUTH_URL']
-    return n
-
-def get_nova_credentials_v11():
-    n = {}
-    n['version'] = '1.1'
-    n['username'] = os.environ['OS_USERNAME']
-    n['api_key'] = os.environ['OS_PASSWORD']
-    n['auth_url'] = os.environ['OS_AUTH_URL']
-    n['project_id'] = os.environ['OS_TENANT_NAME']
-    return n
diff --git a/src/test/cordvtn/network-cfg.json b/src/test/cordvtn/network-cfg.json
deleted file mode 100644
index 09ca263..0000000
--- a/src/test/cordvtn/network-cfg.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
-	"apps" : {
-		"org.opencord.vtn" : {
-			"cordvtn" : {
-				"privateGatewayMac" : "00:00:00:00:00:01",
-				"publicGateways" : [
-				{
-					"gatewayIp" : "20.0.0.1",
-					"gatewayMac" : "fe:00:00:00:00:01"
-				}
-				],
-					"localManagementIp" : "172.27.0.1/24",
-					"ovsdbPort" : "6640",
-					"ssh" : {
-						"sshPort" : "22",
-						"sshUser" : "root",
-						"sshKeyFile" : "/root/node_key"
-					},
-					"openstack" : {
-						"endpoint" : "http://10.90.0.58:5000/v2.0/",
-						"tenant" : "admin",
-						"user" : "admin",
-						"password" : "ADMIN_PASS"
-					},
-					"xos" : {
-						"endpoint" : "http://10.90.0.58:80",
-						"user" : "padmin@vicci.org",
-						"password" : "letmein"
-					},
-					"nodes" : [
-					{
-						"hostname" : "compute-01",
-						"hostManagementIp" : "10.90.0.64/24",
-						"dataPlaneIp" : "192.168.199.1/24",
-						"dataPlaneIntf" : "veth1",
-						"bridgeId" : "of:0000000000000001"
-					},
-					{
-						"hostname" : "compute-02",
-						"hostManagementIp" : "10.90.0.65/24",
-						"dataPlaneIp" : "192.168.199.2/24",
-						"dataPlaneIntf" : "veth1",
-						"bridgeId" : "of:0000000000000002"
-					}
-				]
-			}
-		}
-	}
-}
diff --git a/src/test/cordvtn/nova_utils.py b/src/test/cordvtn/nova_utils.py
deleted file mode 100644
index 02526f2..0000000
--- a/src/test/cordvtn/nova_utils.py
+++ /dev/null
@@ -1,89 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from credentials import get_nova_credentials
-from novaclient.client import Client
-
-class novautils(object):
-
-    def __init__(self, net_id, server_id):
-        self.net_id = net_id#'ae0618cf-fa34-4e8b-816d-c1356c409119'
-        self.server_id = server_id#'99889c8d-113f-4a7e-970c-77f1916bfe14'
-
-    def get_nova_intance(self):
-        creds = get_nova_credentials()
-        nvclient= Client(**creds)
-        return nvclient
-
-    def create_instance_v2(self, vm_name):
-        nvclient = self.get_nova_intance()
-        image = nvclient.images.find(name="vsg-1.1")
-        flavor = nvclient.flavors.find(name="m1.tiny")
-        nic = [{'net-id': self.net_id}]
-        instance = nvclient.servers.create(name=self.vm_name, image=image,
-                                              flavor=flavor,
-                                              nics=nic)
-        time.sleep(5)
-        return instance
-
-    def get_flavors_list(self):
-        nvclient = self.get_nova_intance()
-        flavors_list = nvclient.flavors.list()
-        return flavors_list
-
-    def get_flavor_details(self):
-        nvclient = self.get_nova_intance()
-        flavors_list = nvclient.flavors.list()
-        for fl in  flavors_list:
-            return fl.name, fl.ram, fl.vcpus, fl.disk, fl.id
-
-    def get_servers_list(self):
-        nvclient = self.get_nova_intance()
-        servers = nvclient.servers.list()
-        return servers
-
-    def get_server_details(self):
-        nvclient = self.get_nova_intance()
-        servers = nvclient.servers.get(self.server_id)
-        for s in servers:
-            return s.id, s.name, s.image, s.flavor, s.user_id
-
-    def get_floating_ip_pools(self):
-        nvclient = self.get_nova_intance()
-        ip_list = nvclient.floating_ip_pools.list()
-        return ip_list
-
-    def get_host_list(self):
-        nvclient = self.get_nova_intance()
-        host_list = nvclient.hosts.list()
-        return host_list
-
-    def get_hypervisor_list(self):
-        nvclient = self.get_nova_intance()
-        hyper_list = nvclient.hypervisors.list()
-        return hyper_list
-
-    def get_images_list(self):
-        nvclient = self.get_nova_intance()
-        img_list = nvclient.images.list(detailed=True)
-        return img_list
-
-    def get_aggregates_list(self):
-        nvclient = self.get_nova_intance()
-        return nvclient.aggregates.list()
-
-
-
diff --git a/src/test/cordvtn/utils.py b/src/test/cordvtn/utils.py
deleted file mode 100644
index f2c07d0..0000000
--- a/src/test/cordvtn/utils.py
+++ /dev/null
@@ -1,420 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import sys
-import glanceclient
-
-from keystoneclient.v2_0 import client
-from keystoneclient import utils
-from novaclient import client as novaclient
-from keystoneclient import client as keystoneclient
-from neutronclient.neutron import client as neutronclient
-
-
-def keystone_client_version():
-    api_version = os.getenv('OS_IDENTITY_API_VERSION')
-    if api_version is not None:
-       log.info("Version is set in env as '%s'",
-                    api_version)
-       return api_version
-    return SET_API_VERSION
-
-
-def keystone_client(other_creds={}):
-    sess = session(other_creds)
-    return keystoneclient.Client(keystone_client_version(), session=sess)
-
-
-def nova_client_version():
-    api_version = os.getenv('OS_COMPUTE_API_VERSION')
-    if api_version is not None:
-        log.info("OS_COMPUTE_API_VERSION is set in env as '%s'",
-                    api_version)
-        return api_version
-    return SET_API_VERSION
-
-
-def nova_client(other_creds={}):
-    sess = session(other_creds)
-    return novaclient.Client(nova_client_version(), session=sess)
-
-
-def neutron_client_version():
-    api_version = os.getenv('OS_NETWORK_API_VERSION')
-    if api_version is not None:
-        log.info("OS_NETWORK_API_VERSION is set in env as '%s'",
-                    api_version)
-        return api_version
-    return SET_API_VERSION
-
-
-def neutron_client(other_creds={}):
-    sess = session(other_creds)
-    return neutronclient.Client(neutron_client_version(), session=sess)
-
-
-def glance_client_version():
-    api_version = os.getenv('OS_IMAGE_API_VERSION')
-    if api_version is not None:
-        log.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
-        return api_version
-    return SET_API_VERSION
-
-
-def glance_client(other_creds={}):
-    sess = session(other_creds)
-    return glanceclient.Client(glance_client_version(), session=sess)
-
-def network_list(neutron_client):
-    network_list = neutron_client.list_networks()['networks']
-    if len(network_list) == 0:
-        return None
-    else:
-        return network_list
-
-
-def router_list(neutron_client):
-    router_list = neutron_client.list_routers()['routers']
-    if len(router_list) == 0:
-        return None
-    else:
-        return router_list
-
-
-def port_list(neutron_client):
-    port_list = neutron_client.list_ports()['ports']
-    if len(port_list) == 0:
-        return None
-    else:
-        return port_list
-
-
-def network_id(neutron_client, network_name):
-    networks = neutron_client.list_networks()['networks']
-    id = ''
-    for n in networks:
-        if n['name'] == network_name:
-            id = n['id']
-            break
-    return id
-
-
-def subnet_id(neutron_client, subnet_name):
-    subnets = neutron_client.list_subnets()['subnets']
-    id = ''
-    for s in subnets:
-        if s['name'] == subnet_name:
-            id = s['id']
-            break
-    return id
-
-
-def router_id(neutron_client, router_name):
-    routers = neutron_client.list_routers()['routers']
-    id = ''
-    for r in routers:
-        if r['name'] == router_name:
-            id = r['id']
-            break
-    return id
-
-
-def private_net(neutron_client):
-    networks = neutron_client.list_networks()['networks']
-    if len(networks) == 0:
-        return None
-    for net in networks:
-        if (net['router:external'] is False) and (net['shared'] is True):
-            return net
-    return None
-
-
-def external_net(neutron_client):
-    for network in neutron_client.list_networks()['networks']:
-        if network['router:external']:
-            return network['name']
-    return None
-
-
-def external_net_id(neutron_client):
-    for network in neutron_client.list_networks()['networks']:
-        if network['router:external']:
-            return network['id']
-    return None
-
-
-def check_neutron_net(neutron_client, net_name):
-    for network in neutron_client.list_networks()['networks']:
-        if network['name'] == net_name:
-            for subnet in network['subnets']:
-                return True
-    return False
-
-
-def create_neutron_net(neutron_client, name):
-    json_body = {'network': {'name': name,
-                             'admin_state_up': True}}
-    try:
-        network = neutron_client.create_network(body=json_body)
-        net_sett = network['network']
-        return net_sett['id']
-    except Exception, e:
-        log.info("Error [create_neutron_net(neutron_client, '%s')]: %s"
-                     % (name, e))
-        return None
-
-
-def create_neutron_subnet(neutron_client, name, cidr, net_id):
-    json_body = {'subnets': [{'name': name, 'cidr': cidr,
-                              'ip_version': 4, 'network_id': net_id}]}
-    try:
-        subnet = neutron_client.create_subnet(body=json_body)
-        return subnet['subnets'][0]['id']
-    except Exception, e:
-        log.info("Error [create_neutron_subnet(neutron_client, '%s', "
-                     "'%s', '%s')]: %s" % (name, cidr, net_id, e))
-        return None
-
-
-def create_neutron_router(neutron_client, name):
-    json_body = {'router': {'name': name, 'admin_state_up': True}}
-    try:
-        router = neutron_client.create_router(json_body)
-        return router['router']['id']
-    except Exception, e:
-        log.info("Error [create_neutron_router(neutron_client, '%s')]: %s"
-                     % (name, e))
-        return None
-
-
-def create_neutron_port(neutron_client, name, network_id, ip):
-    json_body = {'port': {
-                 'admin_state_up': True,
-                 'name': name,
-                 'network_id': network_id,
-                 'fixed_ips': [{"ip_address": ip}]
-                 }}
-    try:
-        port = neutron_client.create_port(body=json_body)
-        return port['port']['id']
-    except Exception, e:
-        log.info("Error [create_neutron_port(neutron_client, '%s', '%s', "
-                     "'%s')]: %s" % (name, network_id, ip, e))
-        return None
-
-
-def update_neutron_net(neutron_client, network_id, shared=False):
-    json_body = {'network': {'shared': shared}}
-    try:
-        neutron_client.update_network(network_id, body=json_body)
-        return True
-    except Exception, e:
-        log.info("Error [update_neutron_net(neutron_client, '%s', '%s')]: "
-                     "%s" % (network_id, str(shared), e))
-        return False
-
-
-def update_neutron_port(neutron_client, port_id, device_owner):
-    json_body = {'port': {
-                 'device_owner': device_owner,
-                 }}
-    try:
-        port = neutron_client.update_port(port=port_id,
-                                          body=json_body)
-        return port['port']['id']
-    except Exception, e:
-        log.info("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
-                     " %s" % (port_id, device_owner, e))
-        return None
-
-
-def add_interface_router(neutron_client, router_id, subnet_id):
-    json_body = {"subnet_id": subnet_id}
-    try:
-        neutron_client.add_interface_router(router=router_id, body=json_body)
-        return True
-    except Exception, e:
-        log.info("Error [add_interface_router(neutron_client, '%s', "
-                     "'%s')]: %s" % (router_id, subnet_id, e))
-        return False
-
-
-def add_gateway_router(neutron_client, router_id):
-    ext_net_id = external_net_id(neutron_client)
-    router_dict = {'network_id': ext_net_id}
-    try:
-        neutron_client.add_gateway_router(router_id, router_dict)
-        return True
-    except Exception, e:
-        log.info("Error [add_gateway_router(neutron_client, '%s')]: %s"
-                     % (router_id, e))
-        return False
-
-
-def delete_neutron_net(neutron_client, network_id):
-    try:
-        neutron_client.delete_network(network_id)
-        return True
-    except Exception, e:
-        log.info("Error [delete_neutron_net(neutron_client, '%s')]: %s"
-                     % (network_id, e))
-        return False
-
-
-def delete_neutron_subnet(neutron_client, subnet_id):
-    try:
-        neutron_client.delete_subnet(subnet_id)
-        return True
-    except Exception, e:
-        log.info("Error [delete_neutron_subnet(neutron_client, '%s')]: %s"
-                     % (subnet_id, e))
-        return False
-
-
-def delete_neutron_router(neutron_client, router_id):
-    try:
-        neutron_client.delete_router(router=router_id)
-        return True
-    except Exception, e:
-        log.info("Error [delete_neutron_router(neutron_client, '%s')]: %s"
-                     % (router_id, e))
-        return False
-
-
-def delete_neutron_port(neutron_client, port_id):
-    try:
-        neutron_client.delete_port(port_id)
-        return True
-    except Exception, e:
-        log.info("Error [delete_neutron_port(neutron_client, '%s')]: %s"
-                     % (port_id, e))
-        return False
-
-
-def remove_interface_router(neutron_client, router_id, subnet_id):
-    json_body = {"subnet_id": subnet_id}
-    try:
-        neutron_client.remove_interface_router(router=router_id,
-                                               body=json_body)
-        return True
-    except Exception, e:
-        log.info("Error [remove_interface_router(neutron_client, '%s', "
-                     "'%s')]: %s" % (router_id, subnet_id, e))
-        return False
-
-
-def remove_gateway_router(neutron_client, router_id):
-    try:
-        neutron_client.remove_gateway_router(router_id)
-        return True
-    except Exception, e:
-        log.info("Error [remove_gateway_router(neutron_client, '%s')]: %s"
-                     % (router_id, e))
-        return False
-
-
-def create_network_full(neutron_client,
-                        net_name,
-                        subnet_name,
-                        router_name,
-                        cidr):
-
-    # Check if the network already exists
-    network_id = network_id(neutron_client, net_name)
-    subnet_id = subnet_id(neutron_client, subnet_name)
-    router_id = router_id(neutron_client, router_name)
-
-    if network_id != '' and subnet_id != '' and router_id != '':
-        log.info("A network with name '%s' already exists..." % net_name)
-    else:
-        neutron_client.format = 'json'
-        log.info('Creating neutron network %s...' % net_name)
-        network_id = create_neutron_net(neutron_client, net_name)
-
-        if not network_id:
-            return False
-
-        log.info("Network '%s' created successfully" % network_id)
-        log.info('Creating Subnet....')
-        subnet_id = create_neutron_subnet(neutron_client, subnet_name,
-                                          cidr, network_id)
-        if not subnet_id:
-            return None
-
-        log.info("Subnet '%s' created successfully" % subnet_id)
-        log.info('Creating Router...')
-        router_id = create_neutron_router(neutron_client, router_name)
-
-        if not router_id:
-            return None
-
-        log.info("Router '%s' created successfully" % router_id)
-        log.info('Adding router to subnet...')
-
-        if not add_interface_router(neutron_client, router_id, subnet_id):
-            return None
-
-        log.info("Interface added successfully.")
-
-        log.info('Adding gateway to router...')
-        if not add_gateway_router(neutron_client, router_id):
-            return None
-
-        log.info("Gateway added successfully.")
-
-    net_set = {'net_id': network_id,
-                   'subnet_id': subnet_id,
-                   'router_id': router_id}
-    return net_set
-
-
-def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
-    neutron_client = neutron_client()
-
-    net_set = create_network_full(neutron_client,
-                                      net_name,
-                                      subnt_name,
-                                      router_name,
-                                      subnet_cidr)
-    if net_set:
-        if not update_neutron_net(neutron_client,
-                                  net_set['net_id'],
-                                  shared=True):
-            log.info("Failed to update network %s..." % net_name)
-            return None
-        else:
-            log.info("Network '%s' is available..." % net_name)
-    else:
-        log.info("Network %s creation failed" % net_name)
-        return None
-    return net_set
-
diff --git a/src/test/cordvtn/vtn-cfg-1.json b/src/test/cordvtn/vtn-cfg-1.json
deleted file mode 100644
index 56b767c..0000000
--- a/src/test/cordvtn/vtn-cfg-1.json
+++ /dev/null
@@ -1,48 +0,0 @@
-{
- "apps" : {
-   "org.opencord.vtn" : {
-     "cordvtn" : {
-       "controllers" : [ "10.1.0.1:6654" ],
-       "localManagementIp" : "172.27.0.1/24",
-       "nodes" : [ {
-         "bridgeId" : "of:0000525400201852",
-         "dataPlaneIntf" : "fabric",
-         "dataPlaneIp" : "10.6.1.2/24",
-         "hostManagementIp" : "10.1.0.14/24",
-         "hostname" : "cold-flag"
-       } ],
-       "openstack" : {
-         "endpoint" : "https://keystone.cord.lab:5000/v2.0",
-         "password" : "VeryLongKeystoneAdminPassword",
-         "tenant" : "admin",
-         "user" : "admin"
-       },
-       "ovsdbPort" : "6641",
-       "privateGatewayMac" : "00:00:00:00:00:01",
-       "publicGateways" : [ {
-         "gatewayIp" : "10.6.1.193",
-         "gatewayMac" : "02:42:0a:06:01:01"
-       }, {
-         "gatewayIp" : "10.6.1.129",
-         "gatewayMac" : "02:42:0a:06:01:01"
-       } ],
-       "ssh" : {
-         "sshKeyFile" : "/root/node_key",
-         "sshPort" : "22",
-         "sshUser" : "root"
-       },
-       "xos" : {
-         "endpoint" : "http://xos:8888/",
-         "password" : "letmein",
-         "user" : "padmin@vicci.org"
-       }
-     }
-   }
- },
- "devices" : { },
- "ports" : { },
- "layouts" : { },
- "regions" : { },
- "links" : { },
- "hosts" : { }
-}
diff --git a/src/test/cordvtn/vtn-setup.sh b/src/test/cordvtn/vtn-setup.sh
deleted file mode 100644
index e89e16d..0000000
--- a/src/test/cordvtn/vtn-setup.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-
-sudo brctl addbr fabric
-sudo ip link set fabric up
-sudo ip link add veth0 type veth peer name veth1
-sudo ip link set veth0 up
-sudo ip link set veth1 up
-sudo brctl addif fabric veth0
-sudo brctl addif fabric eth1
-sudo ip addr flush eth1
-sudo ip link set address 00:00:00:00:00:01 dev fabric
-sudo ip link set address 00:00:00:00:00:01 dev eth1
-sudo ip address add 20.0.0.1/24 dev fabric
-sudo ip address add 10.168.0.1/24 dev fabric
-sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
diff --git a/src/test/dhcp/__init__.py b/src/test/dhcp/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/dhcp/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/dhcp/dhcpTest.json b/src/test/dhcp/dhcpTest.json
deleted file mode 100644
index b9d29b4..0000000
--- a/src/test/dhcp/dhcpTest.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "STARTIP":"10.10.10.70", 
-  "ENDIP":"10.10.10.71",
-  "IP":"10.10.10.2", 
-  "MAC": "ca:fe:ca:fe:ca:fe",
-  "SUBNET": "255.255.255.0", 
-  "BROADCAST":"10.10.10.255", 
-  "ROUTER":"10.10.10.1"
-}
-
diff --git a/src/test/dhcp/dhcpTest.py b/src/test/dhcp/dhcpTest.py
deleted file mode 100644
index 4b4a8c3..0000000
--- a/src/test/dhcp/dhcpTest.py
+++ /dev/null
@@ -1,716 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-import time
-import copy
-from DHCP import DHCPTest
-from OltConfig import *
-from OnosCtrl import OnosCtrl
-from portmaps import g_subscriber_port_map
-from CordLogger import CordLogger
-from CordTestConfig import setup_module, teardown_module
-from CordTestUtils import log_test
-log_test.setLevel('INFO')
-
-class dhcp_exchange(CordLogger):
-
-    dhcp_server_config = {
-        "ip": "10.1.11.50",
-        "mac": "ca:fe:ca:fe:ca:fe",
-        "subnet": "255.255.252.0",
-        "broadcast": "10.1.11.255",
-        "router": "10.1.8.1",
-        "domain": "8.8.8.8",
-        "ttl": "63",
-        "delay": "2",
-        "startip": "10.1.11.51",
-        "endip": "10.1.11.100"
-    }
-
-    STARTIP = "10.10.10.40"
-    ENDIP = "10.10.10.41"
-    IP = "10.10.10.2"
-    MAC = "ca:fe:ca:fe:ca:fe"
-    SUBNET = "255.255.255.0"
-    BROADCAST = "10.10.10.255"
-    ROUTER = "10.10.10.1"
-
-    app = 'org.onosproject.dhcp'
-
-    ip_count = 0
-    failure_count = 0
-    start_time = 0
-    diff = 0
-
-    transaction_count = 0
-    transactions = 0
-    running_time = 0
-    total_success = 0
-    total_failure = 0
-
-    @classmethod
-    def setUpClass(cls):
-        cls.config_dhcp = {'startip': cls.STARTIP, 'endip': cls.ENDIP,
-                           'ip':cls.IP, 'mac': cls.MAC, 'subnet': cls.SUBNET,
-                           'broadcast':cls.BROADCAST, 'router':cls.ROUTER}
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        cls.iface = cls.port_map[1]
-
-    def setUp(self):
-        ''' Activate the dhcp app'''
-        super(dhcp_exchange, self).setUp()
-        self.maxDiff = None ##for assert_equal compare outputs on failure
-        self.onos_ctrl = OnosCtrl(self.app)
-        status, _ = self.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(3)
-
-    def tearDown(self):
-        '''Deactivate the dhcp app'''
-        self.onos_ctrl.deactivate()
-        super(dhcp_exchange, self).tearDown()
-
-    def onos_load_config(self, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    def onos_dhcp_table_load(self, config = None):
-          dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
-          dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
-          if config:
-              for k in config.keys():
-                  if dhcp_config.has_key(k):
-                      dhcp_config[k] = config[k]
-          self.onos_load_config(dhcp_dict)
-
-    def send_recv(self, mac = None, update_seed = False, validate = True):
-        cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
-        log_test.info("discover cip %s"%(cip))
-        log_test.info("discover sip %s"%(sip))
-        if validate:
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-            log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                     (cip, sip, self.dhcp.get_mac(cip)[0]))
-        return cip,sip
-
-    def stats(self,success_rate = False, only_discover = False):
-
-	self.ip_count = 0
-	self.failure_count = 0
-	self.start_time = 0
-	self.diff = 0
-	self.transaction_count = 0
-	config = {'startip':'182.17.0.3', 'endip':'182.17.0.180',
-                  'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = self.iface)
-	self.start_time = time.time()
-
-	while self.diff <= 60:
-	    if only_discover:
-		cip, sip, mac, _ = self.dhcp.only_discover(multiple = True)
-                log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                          (cip, sip, mac))
-            else:
-                cip, sip = self.send_recv(update_seed = True, validate = False)
-
-	    if cip:
-	    	self.ip_count +=1
-	    elif cip == None:
-		self.failure_count += 1
-                log_test.info('Failed to get ip')
-		if success_rate and self.ip_count > 0:
-		   break
-	    self.diff = round(time.time() - self.start_time, 0)
-
-
-	self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-
-    	self.transactions += (self.ip_count+self.failure_count)
-	self.running_time += self.diff
-        self.total_success += self.ip_count
-	self.total_failure += self.failure_count
-
-    def test_dhcp_1request(self):
-        self.onos_dhcp_table_load(self.config_dhcp)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-        self.send_recv()
-
-    def test_dhcp_1request_with_invalid_source_mac_broadcast(self):
-        config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                  'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
-	assert_equal(cip,None)
-	log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-
-    def test_dhcp_1request_with_invalid_source_mac_multicast(self):
-        config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                  'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:91:02:e4')
-        assert_equal(cip,None)
-        log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-
-    def test_dhcp_1request_with_invalid_source_mac_zero(self):
-        config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                  'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
-        assert_equal(cip,None)
-        log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-
-    def test_dhcp_Nrequest(self, requests=10):
-        config = {'startip':'192.168.1.20', 'endip':'192.168.1.69',
-                  'ip':'192.168.1.2', 'mac': "ca:fe:ca:fe:cc:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'192.168.1.255', 'router': '192.168.1.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '192.168.1.1', iface = self.iface)
-        ip_map = {}
-        for i in range(requests):
-            cip, sip = self.send_recv(update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-
-    def test_dhcp_1release(self):
-        config = {'startip':'10.10.100.20', 'endip':'10.10.100.230',
-                  'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = self.iface)
-        cip, sip = self.send_recv()
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcp.release(cip), True)
-        log_test.info('Triggering DHCP discover again after release')
-        cip2, sip2 = self.send_recv(update_seed = True)
-        log_test.info('Verifying released IP was given back on rediscover')
-        assert_equal(cip, cip2)
-        log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-        assert_equal(self.dhcp.release(cip2), True)
-
-    def test_dhcp_Nrelease(self):
-        config = {'startip':'192.170.1.20', 'endip':'192.170.1.230',
-                  'ip':'192.170.1.2', 'mac': "ca:fe:ca:fe:9a:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'192.170.1.255', 'router': '192.170.1.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = self.iface)
-        ip_map = {}
-        for i in range(10):
-            cip, sip = self.send_recv(update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-
-        for ip in ip_map.keys():
-            log_test.info('Releasing IP %s' %ip)
-            assert_equal(self.dhcp.release(ip), True)
-
-        ip_map2 = {}
-        log_test.info('Triggering DHCP discover again after release')
-        for i in range(len(ip_map.keys())):
-            cip, sip = self.send_recv(update_seed = True)
-            ip_map2[cip] = sip
-
-        log_test.info('Verifying released IPs were given back on rediscover')
-        if ip_map != ip_map2:
-            log_test.info('Map before release %s' %ip_map)
-            log_test.info('Map after release %s' %ip_map2)
-        assert_equal(ip_map, ip_map2)
-
-
-    def test_dhcp_starvation_positive_scenario(self):
-        config = {'startip':'193.170.1.20', 'endip':'193.170.1.69',
-                  'ip':'193.170.1.2', 'mac': "ca:fe:c2:fe:cc:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'192.168.1.255', 'router': '192.168.1.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '192.169.1.1', iface = self.iface)
-        ip_map = {}
-        for i in range(10):
-            cip, sip = self.send_recv(update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-
-
-    def test_dhcp_starvation_negative_scenario(self):
-        config = {'startip':'182.17.0.20', 'endip':'182.17.0.69',
-                  'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = self.iface)
-        log_test.info('Verifying passitive case')
-        for x in xrange(50):
-            mac = RandMAC()._fix()
-            self.send_recv(mac = mac)
-        log_test.info('Verifying negative case')
-        cip, sip = self.send_recv(update_seed = True, validate = False)
-        assert_equal(cip, None)
-        assert_equal(sip, None)
-
-
-    def test_dhcp_same_client_multiple_discover(self):
-	config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                 'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	log_test.info('Triggering DHCP discover again.')
-	new_cip, new_sip, new_mac, _ = self.dhcp.only_discover()
-	assert_equal(new_cip, cip)
-	log_test.info('client got same IP as expected when sent 2nd discovery')
-
-
-    def test_dhcp_same_client_multiple_request(self):
-	config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                 'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = self.iface)
-	log_test.info('Sending DHCP discover and DHCP request.')
-	cip, sip = self.send_recv()
-	mac = self.dhcp.get_mac(cip)[0]
-	log_test.info("Sending DHCP request again.")
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip,cip)
-	log_test.info('server offered same IP to clain for multiple requests, as expected')
-
-    def test_dhcp_client_desired_address(self):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.50', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-	assert_not_equal(cip, None)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac))
-	assert_equal(cip,self.dhcp.seed_ip)
-	log_test.info('ONOS dhcp server offered client requested IP %s as expected'%self.dhcp.seed_ip)
-
-    #test failing, server not returns NAK when requested out of pool IP
-    def test_dhcp_client_desired_address_out_of_pool(self):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.75', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-	assert_not_equal(cip, None)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,self.dhcp.seed_ip)
-	log_test.info('server offered IP from its pool of IPs when requested out of pool IP, as expected')
-
-
-    def test_dhcp_server_nak_packet(self):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-        assert_equal(new_cip, None)  #Negative Test Case
-	log_test.info('dhcp servers sent NAK as expected when requested different IP from  same client')
-
-
-    #test_dhcp_lease_packet
-    def test_dhcp_client_requests_specific_lease_time_in_discover(self,lease_time = 700):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	self.dhcp.return_option = 'lease'
-	log_test.info('Sending DHCP discover with lease time of 700')
-	cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True, lease_value = lease_time)
-        assert_equal(lval, 700)
-	log_test.info('dhcp server offered IP address with client requested lease  time')
-
-    def test_dhcp_client_request_after_reboot(self):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	log_test.info('verifying client IP after reboot')
-	os.system('ifconfig '+self.iface+' down')
-	time.sleep(5)
-	os.system('ifconfig '+self.iface+' up')
-	new_cip, new_sip = self.dhcp.only_request(cip, mac, cl_reboot = True)
-	assert_equal(new_cip,cip)
-	log_test.info('client got same ip after reboot, as expected')
-
-
-    def test_dhcp_server_after_reboot(self):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	self.onos_ctrl.deactivate()
-	new_cip1, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip1,None)
-	status, _ = self.onos_ctrl.activate()
-        assert_equal(status, True)
-	time.sleep(3)
-	new_cip2, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip2,cip)
-	log_test.info('client got same ip after server reboot, as expected')
-
-    def test_dhcp_specific_lease_time_only_in_discover_but_not_in_request_packet(self,lease_time=700):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	log_test.info('Sending DHCP discover with lease time of 700')
-	cip, sip, mac, _ = self.dhcp.only_discover(lease_time = True,lease_value=lease_time)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True)
-	assert_equal(new_cip,cip)
-	assert_not_equal(lval, lease_time) #Negative Test Case
-	log_test.info('client requested lease time only in discover but not in request, not seen in server ACK packet as expected')
-
-
-    def test_dhcp_specific_lease_time_only_in_request_but_not_in_discover_packet(self,lease_time=800):
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True, lease_value=lease_time)
-	assert_equal(lval, lease_time)
-	log_test.info('client requested lease time in request packet, seen in server ACK packet as expected')
-
-    def test_dhcp_client_renew_time(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-	log_test.info('waiting renew  time %d seconds to send next request packet'%lval)
-	time.sleep(lval)
-	latest_cip, latest_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-	assert_equal(latest_cip,cip)
-	log_test.info('client got same IP after renew time, as expected')
-
-    def test_dhcp_client_rebind_time(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-	log_test.info('waiting rebind time %d seconds to send next request packet'%lval)
-	time.sleep(lval)
-	latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-	assert_equal(latest_cip,cip)
-	log_test.info('client got same IP after rebind time, as expected')
-
-    def test_dhcp_client_expected_subnet_mask(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	expected_subnet = '255.255.255.0'
-	self.dhcp.return_option = 'subnet'
-	cip, sip, mac, subnet_mask = self.dhcp.only_discover()
-	assert_equal(subnet_mask, expected_subnet)
-	assert_not_equal(cip, None)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	log_test.info('seen expected subnet mask %s in dhcp offer packet'%subnet_mask)
-
-    def test_dhcp_client_sends_dhcp_request_with_wrong_subnet_mask(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	self.dhcp.send_different_option = 'subnet'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Subnet Mask in DHCP Request.")
-
-
-    def test_dhcp_client_expected_router_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	expected_router_address = '20.20.20.1'
-	self.dhcp.return_option = 'router'
-
-	cip, sip, mac, router_address_ip = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	assert_equal(expected_router_address, router_address_ip)
-	log_test.info('seen expected rouer address %s ip in dhcp offer packet'%router_address_ip)
-
-    def test_dhcp_client_sends_dhcp_request_with_wrong_router_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	self.dhcp.send_different_option = 'router'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Router Address in DHCP Request.")
-
-
-    def test_dhcp_client_expected_broadcast_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	expected_broadcast_address = '20.20.20.255'
-	self.dhcp.return_option = 'broadcast_address'
-
-	cip, sip, mac, broadcast_address = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	assert_equal(expected_broadcast_address, broadcast_address)
-	log_test.info('seen expected broadcast address %s in dhcp offer packet'%broadcast_address)
-
-    def test_dhcp_client_sends_dhcp_request_with_wrong_broadcast_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	self.dhcp.send_different_option = 'broadcast_address'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Broadcast Address in DHCP Request.")
-
-    def test_dhcp_client_expected_dns_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1', 'domain':'8.8.8.8'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-	expected_dns_address = '8.8.8.8'
-	self.dhcp.return_option = 'dns'
-
-	cip, sip, mac, dns_address = self.dhcp.only_discover()
-	assert_not_equal(cip, None)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_dns_address, dns_address)
-	log_test.info('seen expected DNS ip address %s in dhcp offer packet'%dns_address)
-
-    def test_dhcp_client_sends_request_with_wrong_dns_address(self):
-
-	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1', 'domain':'8.8.8.8'}
-        self.onos_dhcp_table_load(config)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = self.iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	assert_not_equal(cip, None)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	self.dhcp.send_different_option = 'dns'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong DNS Address in DHCP Request.")
-
-    def test_dhcp_server_transactions_per_second(self):
-
-	for i in range(1,4):
-		self.stats()
-		log_test.info("Stats for run %d",i)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	        log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
-
-	log_test.info("Final Statistics for total transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
-
-    def test_dhcp_server_consecutive_successes_per_second(self):
-
-	for i in range(1,4):
-		self.stats(success_rate = True)
-		log_test.info("Stats for run %d",i)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of consecutive successful transactions          Running Time ")
-	        log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
-		log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-
-    def test_dhcp_server_client_transactions_per_second(self):
-
-        for i in range(1,4):
-		self.stats(only_discover = True)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("Stats for run %d of sending only DHCP Discover",i)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	        log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of clients per second in run %d:%f                                      "
-			%(i, self.transaction_count))
-		log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of clients per second: %d                                        ",
-		round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcp_server_consecutive_successful_clients_per_second(self):
-
-        for i in range(1,4):
-		self.stats(success_rate = True, only_discover = True)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("Stats for run %d for sending only DHCP Discover",i)
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of consecutive successful transactions          Running Time ")
-	        log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-		log_test.info("----------------------------------------------------------------------------------")
-		log_test.info("No. of consecutive successful clients per second in run %d:%f" %(i, self.transaction_count))
-		log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful clients per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
diff --git a/src/test/dhcpl2relay/__init__.py b/src/test/dhcpl2relay/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/dhcpl2relay/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/dhcpl2relay/dhcpl2relayTest.json b/src/test/dhcpl2relay/dhcpl2relayTest.json
deleted file mode 100644
index 1c72d60..0000000
--- a/src/test/dhcpl2relay/dhcpl2relayTest.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "TAGGED_TRAFFIC" : false,
-    "VOLTHA_OLT_MAC": "00:0c:e2:31:12:00",
-    "VOLTHA_HOST": "172.17.0.1",
-    "VOLTHA_OLT_TYPE": "ponsim_olt",
-    "VOLTHA_REST_PORT": 8882,
-    "VOLTHA_TEARDOWN": false
-}
diff --git a/src/test/dhcpl2relay/dhcpl2relayTest.py b/src/test/dhcpl2relay/dhcpl2relayTest.py
deleted file mode 100644
index 8de431c..0000000
--- a/src/test/dhcpl2relay/dhcpl2relayTest.py
+++ /dev/null
@@ -1,2400 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF AeY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-import time
-import os, sys, re, json
-from DHCP import DHCPTest
-from CordTestUtils import get_mac, log_test, getstatusoutput, get_controller
-from SSHTestAgent import SSHTestAgent
-from OnosCtrl import OnosCtrl
-from onosclidriver import OnosCliDriver
-from OltConfig import OltConfig
-from CordTestServer import cord_test_onos_restart, cord_test_ovs_flow_add,cord_test_onos_shutdown
-from CordTestConfig import setup_module, teardown_module
-from CordLogger import CordLogger
-from portmaps import g_subscriber_port_map
-from CordContainer import Onos
-from VolthaCtrl import VolthaCtrl
-import threading, random
-from threading import current_thread
-import requests
-log_test.setLevel('INFO')
-
-class dhcpl2relay_exchange(CordLogger):
-
-    VOLTHA_HOST = None
-    VOLTHA_REST_PORT = VolthaCtrl.REST_PORT
-    VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-    VOLTHA_OLT_TYPE = 'simulated_olt'
-    VOLTHA_OLT_MAC = '00:0c:e2:31:12:00'
-    VOLTHA_UPLINK_VLAN_MAP = { 'of:0000000000000001' : '222' }
-    TAGGED_TRAFFIC = False
-    app = 'org.opencord.dhcpl2relay'
-    sadis_app = 'org.opencord.sadis'
-    app_dhcp = 'org.onosproject.dhcp'
-    app_olt = 'org.onosproject.olt'
-    relay_interfaces = ()
-    relay_interfaces_last = ()
-    interface_to_mac_map = {}
-    relay_vlan_map = {}
-    host_ip_map = {}
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    dhcp_data_dir = os.path.join(test_path, '..', 'setup')
-    dhcpl2_app_file = os.path.join(test_path, '..', 'apps/dhcpl2relay-1.0.0.oar')
-    olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-3.0-SNAPSHOT.oar')
-    sadis_app_file = os.path.join(test_path, '..', 'apps/sadis-app-1.0.0-SNAPSHOT.oar')
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config_voltha_local.json'))
-    default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
-    default_options = [ ('subnet-mask', '255.255.255.0'),
-                     ('broadcast-address', '192.168.1.255'),
-                     ('domain-name-servers', '192.168.1.1'),
-                     ('domain-name', '"mydomain.cord-tester"'),
-                   ]
-    default_subnet_config = [ ('192.168.1.2',
-'''
-subnet 192.168.1.0 netmask 255.255.255.0 {
-    range 192.168.1.10 192.168.1.100;
-}
-'''), ]
-
-    lock = threading.Condition()
-    ip_count = 0
-    failure_count = 0
-    start_time = 0
-    diff = 0
-
-    transaction_count = 0
-    transactions = 0
-    running_time = 0
-    total_success = 0
-    total_failure = 0
-    #just in case we want to reset ONOS to default network cfg after relay tests
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-    configs = {}
-    sadis_configs = {}
-    default_onos_netcfg = {}
-    voltha_switch_map = None
-    remote_dhcpd_cmd = []
-    ONOS_INSTANCES = 3
-    relay_device_id = None
-
-    @classmethod
-    def update_apps_version(cls):
-        version = Onos.getVersion()
-        major = int(version.split('.')[0])
-        minor = int(version.split('.')[1])
-        dhcpl2_app_version = '1.0.0'
-        sadis_app_version = '3.0-SNAPSHOT'
-        cls.dhcpl2_app_file = os.path.join(cls.test_path, '..', 'apps/dhcpl2relay-{}.oar'.format(dhcpl2_app_version))
-        cls.sadis_app_file = os.path.join(cls.test_path, '..', 'apps/sadis-app-{}.oar'.format(sadis_app_version))
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the cord dhcpl2relay app'''
-        cls.update_apps_version()
-        OnosCtrl(cls.app_dhcp).deactivate()
-        time.sleep(3)
-        cls.onos_ctrl = OnosCtrl(cls.app)
-        status, _ = cls.onos_ctrl.activate()
-        #assert_equal(status, True)
-        time.sleep(3)
-        status, _ = OnosCtrl(cls.sadis_app).activate()
-        #assert_equal(status, True)
-        time.sleep(3)
-        cls.setup_dhcpd()
-        cls.default_onos_netcfg = OnosCtrl.get_config()
-
-
-    def setUp(self):
-        super(dhcpl2relay_exchange, self).setUp()
-        #self.dhcp_l2_relay_setup()
-        #self.cord_sadis_load()
-        #self.cord_l2_relay_load()
-
-    def tearDown(self):
-        super(dhcpl2relay_exchange, self).tearDown()
-        #OnosCtrl.uninstall_app(self.dhcpl2_app_file)
-        #OnosCtrl.uninstall_app(self.sadis_app_file)
-        #OnosCtrl.uninstall_app(self.olt_app_file)
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the cord dhcpl2relay app'''
-        cls.onos_load_config(cls.default_onos_netcfg)
-        #cls.onos_ctrl.deactivate()
-        #OnosCtrl(cls.sadis_app).deactivate()
-        #OnosCtrl(cls.app_olt).deactivate()
-
-    @classmethod
-    def setup_dhcpd(cls, boot_delay = 5):
-        device_details = OnosCtrl.get_devices(mfr = 'Nicira')
-           ## Assuming only one OVS is detected on ONOS and its for external DHCP server connect point...
-        if device_details is not None:
-           did_ovs = device_details[0]['id']
-        else:
-           log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
-           return False
-        cls.relay_device_id = did_ovs
-        device_details = OnosCtrl.get_devices()
-        if device_details is not None:
-           for device in device_details:
-               if device['available'] is True and device['driver'] == 'voltha':
-                  cls.olt_serial_id = "{}".format(device['serial'])
-                  break
-               else:
-                  cls.olt_serial_id = " "
-        else:
-            log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
-            return False
-        if cls.service_running("/usr/sbin/dhcpd"):
-            print('DHCPD already running in container')
-            return True
-        setup_for_relay = cls.dhcp_l2_relay_setup()
-        cls.cord_l2_relay_load()
-        cls.voltha_setup()
-        return True
-
-        # dhcp_start_status = cls.dhcpd_start()
-        # if setup_for_relay and dhcp_start_status:
-        #     return True
-        # return False
-
-    @classmethod
-    def config_olt(cls, switch_map):
-        controller = get_controller()
-        auth = ('karaf', 'karaf')
-        #configure subscriber for every port on all the voltha devices
-        for device, device_map in switch_map.iteritems():
-            uni_ports = device_map['ports']
-            uplink_vlan = device_map['uplink_vlan']
-            for port in uni_ports:
-                vlan = port
-                rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}/{}'.format(controller,
-                                                                            device,
-                                                                            port,
-                                                                            vlan)
-                requests.post(rest_url, auth = auth)
-
-    @classmethod
-    def voltha_setup(cls):
-        s_tag_map = {}
-        #configure olt app to provision dhcp flows
-        cls.config_olt(cls.voltha_switch_map)
-        for switch, switch_map in cls.voltha_switch_map.iteritems():
-            s_tag_map[int(switch_map['uplink_vlan'])] = map(lambda p: int(p), switch_map['ports'])
-
-        cmd_list = []
-        relay_interface = cls.relay_interfaces[0]
-        cls.relay_vlan_map[relay_interface] = []
-        for s_tag, ports in s_tag_map.iteritems():
-            vlan_stag_intf = '{}.{}'.format(relay_interface, s_tag)
-            cmd = 'ip link add link %s name %s type vlan id %d' %(relay_interface, vlan_stag_intf, s_tag)
-            cmd_list.append(cmd)
-            cmd = 'ip link set %s up' %(vlan_stag_intf)
-            cmd_list.append(cmd)
-            for port in ports:
-                vlan_ctag_intf = '{}.{}.{}'.format(relay_interface, s_tag, port)
-                cmd = 'ip link add link %s name %s type vlan id %d' %(vlan_stag_intf, vlan_ctag_intf, port)
-                cmd_list.append(cmd)
-                cmd = 'ip link set %s up' %(vlan_ctag_intf)
-                cmd_list.append(cmd)
-                cls.relay_vlan_map[relay_interface].append(vlan_ctag_intf)
-            cls.relay_vlan_map[relay_interface].append(vlan_stag_intf)
-
-        for cmd in cmd_list:
-            log_test.info('Running command: %s' %cmd)
-            os.system(cmd)
-
-        cord_test_ovs_flow_add(cls.relay_interface_port)
-        for s_tag in s_tag_map.keys():
-            log_test.info('Configuring OVS flow for port %d, s_tag %d' %(cls.relay_interface_port, s_tag))
-            cord_test_ovs_flow_add(cls.relay_interface_port, s_tag)
-
-    @classmethod
-    def service_running(cls, pattern):
-        st, output = getstatusoutput('pgrep -f "{}"'.format(pattern))
-        return True if st == 0 else False
-
-    @classmethod
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    @classmethod
-    def dhcpd_start(cls, intf_list = None,
-                    config = default_config, options = default_options,
-                    subnet = default_subnet_config):
-        '''Start the dhcpd server by generating the conf file'''
-        if intf_list is None:
-            intf_list = cls.relay_interfaces
-        intf_list = list(intf_list)
-        ##stop dhcpd if already running
-        #cls.dhcpd_stop()
-        dhcp_conf = cls.dhcpd_conf_generate(config = config, options = options,
-                                            subnet = subnet)
-        ##first touch dhcpd.leases if it doesn't exist
-        lease_file = '{}/dhcpd.leases'.format(cls.dhcp_data_dir)
-        if os.access(lease_file, os.F_OK) is False:
-            with open(lease_file, 'w') as fd: pass
-
-        lease_file_tagged = '{}/dhcpd-tagged.leases'.format(cls.dhcp_data_dir)
-        if os.access(lease_file_tagged, os.F_OK) is False:
-            with open(lease_file_tagged, 'w') as fd: pass
-
-        conf_file = '{}/dhcpd.conf'.format(cls.dhcp_data_dir)
-        with open(conf_file, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        conf_file_tagged = '{}/dhcpd-tagged.conf'.format(cls.dhcp_data_dir)
-        with open(conf_file_tagged, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        #now configure the dhcpd interfaces for various subnets
-        index = 0
-        intf_info = []
-        vlan_intf_list = []
-        for ip,_ in subnet:
-            vlan_intf = None
-            intf = intf_list[index]
-            if intf in cls.relay_vlan_map:
-                vlan_intf = cls.relay_vlan_map[intf][0]
-                vlan_intf_list.append(vlan_intf)
-            mac = cls.get_mac(intf)
-            intf_info.append((ip, mac))
-            index += 1
-            cmd = 'ifconfig {} {}'.format(intf, ip)
-            status = os.system(cmd)
-            if vlan_intf:
-                cmd = 'ifconfig {} {}'.format(vlan_intf, ip)
-                os.system(cmd)
-
-        intf_str = ','.join(intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format('/root/test/src/test/setup/dhcpd.conf','/root/test/src/test/setup/dhcpd.leases', intf_str)
-        print('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        status = os.system(dhcpd_cmd)
-        vlan_intf_str = ','.join(vlan_intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format('/root/test/src/test/setup/dhcpd-tagged.conf','/root/test/src/test/setup/dhcpd-tagged.leases', vlan_intf_str)
-        print('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        status = os.system(dhcpd_cmd)
-        if status > 255:
-           status = 1
-        else:
-           return False
-        time.sleep(3)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        cls.relay_interfaces = intf_list
-        return True
-
-    @classmethod
-    def get_dhcpd_process(cls):
-        docker_cmd = 'docker exec cord-tester1'
-        cmd = '{} ps -eaf | grep dhcpd'.format(docker_cmd)
-        dhcpd_server_ip = get_controller()
-        server_user = 'ubuntu'
-        server_pass = 'ubuntu'
-        ssh_agent = SSHTestAgent(host = dhcpd_server_ip, user = server_user, password = server_user)
-        status, output = ssh_agent.run_cmd(cmd)
-        assert_equal(status, True)
-        if output:
-           cls.remote_dhcpd_cmd = re.findall('(?<=/)\w+.*', output)
-        log_test.info('DHCP server running on remote host and list of service commands are \n %s'%cls.remote_dhcpd_cmd)
-        assert_equal(status, True)
-        return cls.remote_dhcpd_cmd
-
-    def dhcpd_stop(self, remote_controller = False, dhcpd = None):
-        if remote_controller is not True:
-           if cls.service_running("/usr/sbin/dhcpd"):
-              cmd = 'pkill -9 dhcpd'
-              st, _ = getstatusoutput(cmd)
-              return True if st == 0 else False
-        else:
-           docker_cmd = 'docker exec cord-tester1'
-           dhcpd_server_ip = get_controller()
-           server_user = 'ubuntu'
-           server_pass = 'ubuntu'
-           service_satatus = True
-           ssh_agent = SSHTestAgent(host = dhcpd_server_ip, user = server_user, password = server_user)
-           if dhcpd == 'stop':
-              status, output = ssh_agent.run_cmd('{} pkill -9 dhcpd'.format(docker_cmd))
-              service_satatus = status and True
-           elif dhcpd == 'start':
-              for cmd in self.remote_dhcpd_cmd:
-                 dhcpd_cmd = ' {0} /{1}'.format(docker_cmd,cmd)
-                 status, output = ssh_agent.run_cmd(dhcpd_cmd)
-                 service_satatus = status and True
-           elif dhcpd == 'restart':
-              status, output = ssh_agent.run_cmd('{} pkill -9 dhcpd'.format(docker_cmd))
-              service_satatus = status and True
-              for cmd in self.remote_dhcpd_cmd:
-                 dhcpd_cmd = ' {0} /{1}'.format(docker_cmd,cmd)
-                 status, output = ssh_agent.run_cmd(dhcpd_cmd)
-                 service_satatus = status and True
-           return service_satatus
-
-    @classmethod
-    def dhcp_l2_relay_setup(cls):
-        device_details = OnosCtrl.get_devices(mfr = 'Nicira')
-        if device_details is not None:
-            did_ovs = device_details[0]['id']
-        else:
-           log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
-           return False
-        cls.relay_device_id = did_ovs
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.port_map:
-            ##Per subscriber, we use 1 relay port
-            try:
-                relay_port = cls.port_map[cls.port_map['relay_ports'][0]]
-            except:
-                relay_port = cls.port_map['uplink']
-            cls.relay_interface_port = relay_port
-            cls.relay_interfaces = (cls.port_map[cls.relay_interface_port],)
-        else:
-            cls.relay_interface_port = 100
-            cls.relay_interfaces = (g_subscriber_port_map[cls.relay_interface_port],)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        if cls.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in cls.port_map['ports']:
-                port_num = cls.port_map[port]
-                if port_num == cls.port_map['uplink']:
-                    continue
-                ip = cls.get_host_ip(port_num)
-                mac = cls.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure dhcp server virtual interface on the same subnet as first client interface
-            relay_ip = cls.get_host_ip(interface_list[0][0])
-            relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
-            interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
-            cls.onos_interface_load(interface_list)
-
-    @classmethod
-    def dhcp_l2_relay_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        for config in cls.configs.items():
-            OnosCtrl.delete(config)
-        cls.onos_load_config(cls.default_onos_config)
-        # if cls.onos_restartable is True:
-        #     log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
-        #     return cord_test_onos_restart(config = {})
-
-    @classmethod
-    def onos_load_config(cls, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    @classmethod
-    def onos_delete_config(cls, config):
-        status, code = OnosCtrl.delete(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    @classmethod
-    def onos_interface_load(cls, interface_list):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(cls.relay_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        cls.onos_load_config(interface_dict)
-        cls.configs['interface_config'] = interface_dict
-
-    @classmethod
-    def cord_l2_relay_load(cls, dhcp_server_connectPoint = None, delete = False):
-        ##read the current config
-        current_netcfg = OnosCtrl.get_config()
-        connect_points = set([])
-        try:
-            connect_points = set(current_netcfg['apps']['org.opencord.dhcpl2relay']['dhcpl2relay']['dhcpServerConnectPoints'])
-        except KeyError, e:
-            pass
-
-        OnosCtrl.uninstall_app(cls.dhcpl2_app_file)
-        relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
-        #### We have to work on later versions by removing these hard coded values
-        if dhcp_server_connectPoint is None:
-            relay_device_present = filter(lambda cp: cp.split('/')[0] == cls.relay_device_id, connect_points)
-            if not relay_device_present:
-                connect_points.add(relay_device_map)
-        else:
-            cps_unused = map(lambda cp: connect_points.add(cp), dhcp_server_connectPoint)
-        connect_points = list(connect_points)
-        dhcp_dict = { "apps" : { "org.opencord.dhcpl2relay" : {"dhcpl2relay" :
-                                   {"dhcpServerConnectPoints": connect_points}
-                                                        }
-                            }
-                    }
-        #OnosCtrl.uninstall_app(cls.dhcpl2_app_file)
-        OnosCtrl.install_app(cls.dhcpl2_app_file)
-        if delete == False:
-           cls.onos_load_config(dhcp_dict)
-        else:
-           cls.onos_delete_config(dhcp_dict)
-           cls.onos_load_config(cls.default_onos_config)
-        cls.configs['relay_config'] = dhcp_dict
-
-    @classmethod
-    def cord_sadis_load(cls, sadis_info = None):
-        relay_device_id = '{}'.format(cls.relay_device_id)
-        device_details = OnosCtrl.get_devices()
-        if device_details is not None:
-           for device in device_details:
-             ## Assuming only one OVS is detected on ONOS and its for external DHCP server connect point...
-             if device['available'] is True and device['driver'] == 'voltha':
-                cls.olt_serial_id = "{}".format(device['serial'])
-             else:
-                cls.olt_serial_id = " "
-        else:
-           log_test.info('On this DHCPl2relay setup, onos does not have Tibit device where DHCP client is connected on UNI point, so return with false status')
-           return False
-        sadis_dict =  { "apps": {
-                "org.opencord.sadis": {
-                        "sadis": {
-                                "integration": {
-                                        "cache": {
-                                                "enabled": "true",
-                                                "maxsize": 50,
-                                                "ttl": "PT1m"
-                                        }
-                                },
-                                "entries": [{
-                                                "id": "uni-254",
-                                                "cTag": 202,
-                                                "sTag": 222,
-                                                "nasPortId": "uni-254"
-                                        },
-                                        {
-                                                "id": cls.olt_serial_id,
-                                                "hardwareIdentifier": "00:0c:e2:31:05:00",
-                                                "ipAddress": "172.17.0.1",
-                                                "nasId": "B100-NASID"
-                                        }
-                                ]
-                        }
-                }
-           }
-        }
-        #OnosCtrl.uninstall_app(cls.olt_app_file)
-        OnosCtrl.install_app(cls.olt_app_file)
-        time.sleep(5)
-        #OnosCtrl.uninstall_app(cls.sadis_app_file)
-        OnosCtrl.install_app(cls.sadis_app_file)
-        if sadis_info:
-           sadis_dict = sadis_info
-        cls.onos_load_config(sadis_dict)
-        cls.sadis_configs['relay_config'] = sadis_dict
-
-    def sadis_info_dict(self, subscriber_port_id =None, c_tag = None, s_tag = None, nas_port_id =None,olt_serial_id =None,olt_mac=None,olt_ip =None,olt_nas_id=None):
-        ### Need to work on these hard coded values on later merges
-        if subscriber_port_id is None:
-           subscriber_port_id = "uni-254"
-        if c_tag is None:
-           c_tag = 202
-        if s_tag is None:
-           s_tag = 222
-        if nas_port_id is None:
-           nas_port_id = "uni-254"
-        if olt_serial_id is None:
-           olt_serial_id = self.olt_serial_id
-        if olt_mac is None:
-           olt_mac = "00:0c:e2:31:05:00"
-        if olt_ip is None:
-           olt_ip = "172.17.0.1"
-        if olt_nas_id is None:
-           olt_nas_id = "B100-NASID"
-        sadis_dict =  { "apps": {
-                "org.opencord.sadis": {
-                        "sadis": {
-                                "integration": {
-                                        "cache": {
-                                                "enabled": "true",
-                                                "maxsize": 50,
-                                                "ttl": "PT1m"
-                                        }
-                                },
-                                "entries": [{
-                                                "id": subscriber_port_id,
-                                                "cTag": c_tag,
-                                                "sTag": s_tag,
-                                                "nasPortId": nas_port_id
-                                        },
-                                        {
-                                                "id": olt_serial_id,
-                                                "hardwareIdentifier": olt_mac,
-                                                "ipAddress": olt_ip,
-                                                "nasId": olt_nas_id
-                                        }
-                                ]
-                        }
-                }
-           }
-        }
-        return sadis_dict
-
-
-    @classmethod
-    def get_host_ip(cls, port):
-        if cls.host_ip_map.has_key(port):
-            return cls.host_ip_map[port]
-        cls.host_ip_map[port] = '192.168.100.{}'.format(port)
-        return cls.host_ip_map[port]
-
-    @classmethod
-    def host_load(cls, iface):
-        '''Have ONOS discover the hosts for dhcp-relay responses'''
-        port = g_subscriber_port_map[iface]
-        host = '173.17.1.{}'.format(port)
-        cmds = ( 'ifconfig {} 0'.format(iface),
-                 'ifconfig {0} {1}'.format(iface, host),
-                 'arping -I {0} {1} -c 2'.format(iface, host),
-                 'ifconfig {} 0'.format(iface), )
-        for c in cmds:
-            os.system(c)
-
-    @classmethod
-    def get_mac(cls, iface):
-        if cls.interface_to_mac_map.has_key(iface):
-            return cls.interface_to_mac_map[iface]
-        mac = get_mac(iface, pad = 0)
-        cls.interface_to_mac_map[iface] = mac
-        return mac
-
-    def dhcpl2relay_stats_calc(self, success_rate = False, only_discover = False, iface = 'veth0'):
-
-	self.ip_count = 0
-	self.failure_count = 0
-	self.start_time = 0
-	self.diff = 0
-	self.transaction_count = 0
-
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-	self.start_time = time.time()
-
-	while self.diff <= 60:
-
-	    if only_discover:
-		cip, sip, mac, _ = self.dhcp.only_discover(multiple = True)
-                log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                        (cip, sip, mac))
-	    else:
-	        cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
-
-	    if cip:
-                self.ip_count +=1
-	    elif cip == None:
-		self.failure_count += 1
-                log_test.info('Failed to get ip')
-		if success_rate and self.ip_count > 0:
-			break
-
-	    self.diff = round(time.time() - self.start_time, 0)
-
-	self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-        self.transactions += (self.ip_count+self.failure_count)
-	self.running_time += self.diff
-        self.total_success += self.ip_count
-	self.total_failure += self.failure_count
-
-    def send_recv(self, mac=None, update_seed = False, validate = True):
-        cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
-        if validate:
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                (cip, sip, self.dhcp.get_mac(cip)[0]))
-        return cip,sip
-
-    def cliEnter(self, controller = None):
-        retries = 0
-        while retries < 30:
-            self.cli = OnosCliDriver(controller = controller, connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-
-    def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
-        tries = 0
-        try:
-            self.cliEnter(controller = controller)
-            while tries <= 10:
-                cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
-                if cluster_summary:
-                    log_test.info("cluster 'summary' command output is %s"%cluster_summary)
-                    nodes = cluster_summary['nodes']
-                    if verify:
-                        if nodes == onos_instances:
-                            self.cliExit()
-                            return True
-                        else:
-                            tries += 1
-                            time.sleep(1)
-                    else:
-                        if nodes >= onos_instances:
-                            self.cliExit()
-                            return True
-                        else:
-                            tries += 1
-                            time.sleep(1)
-                else:
-                    tries += 1
-                    time.sleep(1)
-            self.cliExit()
-            return False
-        except:
-            raise Exception('Failed to get cluster members')
-            return False
-
-
-    def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
-        tries = 0
-        cluster_ips = []
-        try:
-            self.cliEnter(controller = controller)
-            while tries <= 10:
-                cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
-                if cluster_nodes:
-                    log_test.info("cluster 'nodes' output is %s"%cluster_nodes)
-                    if nodes_filter:
-                        cluster_nodes = nodes_filter(cluster_nodes)
-                    cluster_ips = map(lambda c: c['id'], cluster_nodes)
-                    self.cliExit()
-                    cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
-                    return cluster_ips
-                else:
-                    tries += 1
-            self.cliExit()
-            return cluster_ips
-        except:
-            raise Exception('Failed to get cluster members')
-            return cluster_ips
-
-    def get_cluster_container_names_ips(self,controller=None):
-        onos_names_ips = {}
-        controllers = get_controllers()
-        i = 0
-        for controller in controllers:
-            if i == 0:
-                name = Onos.NAME
-            else:
-                name = '{}-{}'.format(Onos.NAME, i+1)
-            onos_names_ips[controller] = name
-            onos_names_ips[name] = controller
-            i += 1
-        return onos_names_ips
-
-    def get_cluster_current_master_standbys(self,controller=None,device_id=relay_device_id):
-        master = None
-        standbys = []
-        tries = 0
-        try:
-            cli = self.cliEnter(controller = controller)
-            while tries <= 10:
-                roles = json.loads(self.cli.roles(jsonFormat = True))
-                log_test.info("cluster 'roles' command output is %s"%roles)
-                if roles:
-                    for device in roles:
-                        log_test.info('Verifying device info in line %s'%device)
-                        if device['id'] == device_id:
-                            master = str(device['master'])
-                            standbys = map(lambda d: str(d), device['standbys'])
-                            log_test.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
-                            self.cliExit()
-                            return master, standbys
-                            break
-                    self.cliExit()
-                    return master, standbys
-                else:
-                    tries += 1
-                    time.sleep(1)
-            self.cliExit()
-            return master,standbys
-        except:
-            raise Exception('Failed to get cluster members')
-            return master,standbys
-
-    def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
-        ''' returns master and standbys of all the connected devices to ONOS cluster instance'''
-        device_dict = {}
-        tries = 0
-        try:
-            cli = self.cliEnter(controller = controller)
-            while tries <= 10:
-                device_dict = {}
-                roles = json.loads(self.cli.roles(jsonFormat = True))
-                log_test.info("cluster 'roles' command output is %s"%roles)
-                if roles:
-                    for device in roles:
-                        device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
-                        for i in range(len(device_dict[device['id']]['standbys'])):
-                            device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
-                        log_test.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
-                    self.cliExit()
-                    return device_dict
-                else:
-                    tries += 1
-                    time.sleep(1)
-            self.cliExit()
-            return device_dict
-        except:
-            raise Exception('Failed to get cluster members')
-            return device_dict
-
-    def get_number_of_devices_of_master(self,controller=None):
-        '''returns master-device pairs, which master having what devices'''
-        master_count = {}
-        try:
-            cli = self.cliEnter(controller = controller)
-            masters = json.loads(self.cli.masters(jsonFormat = True))
-            if masters:
-                for master in masters:
-                    master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
-                return master_count
-            else:
-                return master_count
-        except:
-            raise Exception('Failed to get cluster members')
-            return master_count
-
-    def change_master_current_cluster(self,new_master=None,device_id=relay_device_id,controller=None):
-        if new_master is None: return False
-        self.cliEnter(controller=controller)
-        cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
-        command = self.cli.command(cmd = cmd, jsonFormat = False)
-        self.cliExit()
-        time.sleep(60)
-        master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
-        assert_equal(master,new_master)
-        log_test.info('Cluster master changed to %s successfully'%new_master)
-
-    def withdraw_cluster_current_mastership(self,master_ip=None,device_id=relay_device_id,controller=None):
-        '''current master looses its mastership and hence new master will be elected'''
-        self.cliEnter(controller=controller)
-        cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
-        command = self.cli.command(cmd = cmd, jsonFormat = False)
-        self.cliExit()
-        time.sleep(60)
-        new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
-        assert_not_equal(new_master_ip,master_ip)
-        log_test.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
-        log_test.info('Cluster new master is %s'%new_master_ip)
-        return True
-    def cluster_controller_restarts(self, graceful = False):
-        controllers = get_controllers()
-        ctlr_len = len(controllers)
-        if ctlr_len <= 1:
-            log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
-            assert_greater(ctlr_len, 1)
-
-        #this call would verify the cluster for once
-        onos_map = self.get_cluster_container_names_ips()
-
-        def check_exception(iteration, controller = None):
-            adjacent_controller = None
-            adjacent_controllers = None
-            if controller:
-                adjacent_controllers = list(set(controllers) - set([controller]))
-                adjacent_controller = adjacent_controllers[0]
-            for node in controllers:
-                onosLog = OnosLog(host = node)
-                ##check the logs for storage exception
-                _, output = onosLog.get_log(('ERROR', 'Exception',))
-                if output and output.find('StorageException$Timeout') >= 0:
-                    log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
-                    log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    log_test.info('%s' %output)
-                    log_test.info('\n' + '-' * 50 + '\n')
-                    failed = self.verify_leaders(controllers)
-                    if failed:
-                        log_test.info('Leaders command failed on nodes: %s' %failed)
-                        log_test.error('Test failed on ITERATION %d' %iteration)
-                        CordLogger.archive_results(self._testMethodName,
-                                                   controllers = controllers,
-                                                   iteration = 'FAILED',
-                                                   archive_partition = self.ARCHIVE_PARTITION)
-                        assert_equal(len(failed), 0)
-                    return controller
-
-            try:
-                ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
-                log_test.info('ONOS cluster formed with controllers: %s' %ips)
-                st = True
-            except:
-                st = False
-
-            failed = self.verify_leaders(controllers)
-            if failed:
-                log_test.error('Test failed on ITERATION %d' %iteration)
-                CordLogger.archive_results(self._testMethodName,
-                                           controllers = controllers,
-                                           iteration = 'FAILED',
-                                           archive_partition = self.ARCHIVE_PARTITION)
-            assert_equal(len(failed), 0)
-            if st is False:
-                log_test.info('No storage exception and ONOS cluster was not formed successfully')
-            else:
-                controller = None
-
-            return controller
-
-        next_controller = None
-        tries = self.ITERATIONS
-        for num in range(tries):
-            index = num % ctlr_len
-            #index = random.randrange(0, ctlr_len)
-            controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
-            controller = onos_map[controller_name]
-            log_test.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
-            try:
-                #enable debug log for the other controllers before restarting this controller
-                adjacent_controllers = list( set(controllers) - set([controller]) )
-                self.log_set(controllers = adjacent_controllers)
-                self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
-                if graceful is True:
-                    log_test.info('Gracefully shutting down controller: %s' %controller)
-                    self.onos_shutdown(controller)
-                cord_test_onos_restart(node = controller, timeout = 0)
-                self.log_set(controllers = controller)
-                self.log_set(app = 'io.atomix', controllers = controller)
-                time.sleep(60)
-            except:
-                time.sleep(5)
-                continue
-
-            #first archive the test case logs for this run
-            CordLogger.archive_results(self._testMethodName,
-                                       controllers = controllers,
-                                       iteration = 'iteration_{}'.format(num+1),
-                                       archive_partition = self.ARCHIVE_PARTITION)
-            next_controller = check_exception(num, controller = controller)
-
-    def onos_shutdown(self, controller = None):
-        status = True
-        self.cliEnter(controller = controller)
-        try:
-            self.cli.shutdown(timeout = 10)
-        except:
-            log_test.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
-            status = False
-
-        self.cliExit()
-        return status
-
-    def test_dhcpl2relay_initialize(self):
-        '''Configure the DHCP L2 relay app and start dhcpd'''
-        self.dhcpd_start()
-
-    def test_dhcpl2relay_with_one_request(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        self.send_recv(mac=mac)
-
-    def test_dhcpl2relay_app_install(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.dhcpl2relay'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not being installed'%app_name)
-           assert_equal(True, app_status)
-
-    def test_dhcpl2relay_sadis_app_install(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not being installed'%app_name)
-           assert_equal(True, app_status)
-
-    def test_dhcpl2relay_netcfg(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.dhcpl2relay'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               else:
-                  log_test.info('The network configuration is shown = %s'%onos_netcfg['apps'][app_name])
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown'%app_name)
-           assert_equal(True, False)
-
-    def test_dhcpl2relay_sadis_netcfg(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               else:
-                  log_test.info('The network configuration is shown = %s'%(onos_netcfg['apps'][app_name]))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown'%app_name)
-           assert_equal(True, False)
-
-    def test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server(self, iface = 'veth0'):
-        connect_point = self.default_onos_netcfg['apps']['org.opencord.dhcpl2relay']['dhcpl2relay']['dhcpServerConnectPoints']
-        log_test.info('Existing connect point of dhcp server is %s'%connect_point)
-        relay_device_map1 = '{}/{}'.format(self.relay_device_id, random.randrange(1,5, 1))
-        relay_device_map2 = '{}/{}'.format(self.relay_device_id, random.randrange(6,10, 1))
-        relay_device_map3 = '{}/{}'.format(self.relay_device_id, random.randrange(10,16, 1))
-        relay_device_map4 = '{}/{}'.format(self.relay_device_id, random.randrange(17,23, 1))
-        dhcp_server_array_connectPoints = [connect_point[0],relay_device_map1,relay_device_map2,relay_device_map3,relay_device_map4]
-        log_test.info('Added array of connect points of dhcp server is %s'%dhcp_server_array_connectPoints)
-
-        mac = self.get_mac(iface)
-        self.onos_load_config(self.default_onos_netcfg)
-        dhcp_dict = { "apps" : { "org.opencord.dhcpl2relay" : {"dhcpl2relay" :
-                                   {"dhcpServerConnectPoints": dhcp_server_array_connectPoints}
-                                                        }
-                            }
-                    }
-        self.onos_load_config(dhcp_dict)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.dhcpl2relay'
-        for app in onos_netcfg['apps']:
-            if app == app_name and onos_netcfg['apps'][app] != {}:
-               log_test.info('%s app is being installed'%app)
-               log_test.info('The network configuration is shown %s'%onos_netcfg['apps'][app])
-               x = set(onos_netcfg['apps'][app_name]['dhcpl2relay']['dhcpServerConnectPoints']) & set(dhcp_server_array_connectPoints)
-               if len(x) == len(dhcp_server_array_connectPoints):
-                  log_test.info('The loaded onos network configuration is = %s'%dhcp_server_array_connectPoints)
-                  app_status = True
-               break
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown'%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        self.send_recv(mac=mac)
-
-
-    def test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        c_tag = 600
-        invalid_sadis_info = self.sadis_info_dict(c_tag = 600,s_tag = 500)
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag'] == c_tag:
-                  log_test.info('The S Tag and C Tag info from network configuration are %s and %s respectively '%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag'],onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_app_activation_and_deactivation_multiple_times(self, iface = 'veth0'):
-        iterations = 15
-        for i in range(iterations):
-            self.onos_ctrl.deactivate()
-            time.sleep(3)
-            self.onos_ctrl.activate()
-        log_test.info('Dhcpl2relay app is activated and deactivated multiple times around %s, now sending DHCP discover'%iterations)
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        self.send_recv(mac=mac)
-
-    def test_dhcpl2relay_without_sadis_app(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        OnosCtrl.uninstall_app(self.sadis_app_file)
-        OnosCtrl(self.sadis_app).deactivate()
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_delete_and_add_sadis_app(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        log_test.info('Uninstall the sadis app from onos ,app version = %s '%self.sadis_app_file)
-        OnosCtrl.uninstall_app(self.sadis_app_file)
-        OnosCtrl(self.sadis_app).deactivate()
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-        log_test.info('Installing the sadis app in onos again, app version = %s '%self.sadis_app_file)
-        OnosCtrl.install_app(self.sadis_app_file)
-        OnosCtrl(self.sadis_app).activate()
-        OnosCtrl(self.app).activate()
-        #self.onos_load_config(self.sadis_configs['relay_config'])
-        self.send_recv(mac=mac)
-
-    def test_dhcpl2relay_with_option_82(self, iface = 'veth0'):
-        pass
-
-    def test_dhcpl2relay_without_option_82(self, iface = 'veth0'):
-        pass
-
-    def test_dhcl2relay_for_option82_without_configuring_dhcpserver_to_accept_option82(self, iface = 'veth0'):
-        pass
-
-    def test_dhcpl2relay_with_different_uni_port_entry_sadis_config(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        subscriber_port_id = "uni-200"
-        invalid_sadis_info = self.sadis_info_dict(subscriber_port_id = "uni-200")
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['id'] == subscriber_port_id:
-                  log_test.info('The network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['id']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_with_different_ctag_options(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        c_tag = 600
-        invalid_sadis_info = self.sadis_info_dict(c_tag = 600)
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag'] == c_tag:
-                  log_test.info('The C Tag info from network configuration is = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_with_different_stag_options(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        s_tag = 600
-        invalid_sadis_info = self.sadis_info_dict(s_tag = 600)
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag'] == s_tag:
-                  log_test.info('The S Tag info from the network configuration is = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_without_nasportid_option_in_sadis(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        invalid_sadis_info = self.sadis_info_dict(nas_port_id = " ")
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId'] == " ":
-                  log_test.info('The nasPortId info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_with_nasportid_different_from_id(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        nas_port_id = "uni-509"
-        invalid_sadis_info = self.sadis_info_dict(nas_port_id = "uni-509")
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId'] == nas_port_id:
-                  log_test.info('The nasPortId info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_without_serial_id_of_olt(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        invalid_sadis_info = self.sadis_info_dict(olt_serial_id = " ")
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id'] == " ":
-                  log_test.info('The serial Id info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id']))
-                  app_status = True
-        if app_status is not True:
-           log_test.info('%s app is not installed or network configuration is not shown '%app_name)
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_with_wrong_serial_id_of_olt(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        olt_serial_id = "07f20d06696041febf974ccdhdhhjh37"
-        invalid_sadis_info = self.sadis_info_dict(olt_serial_id = "07f20d06696041febf974ccdhdhhjh37")
-        self.cord_sadis_load(sadis_info = invalid_sadis_info)
-        onos_netcfg = OnosCtrl.get_config()
-        app_status = False
-        app_name = 'org.opencord.sadis'
-        for app in onos_netcfg['apps']:
-            if app == app_name:
-               log_test.info('%s app is being installed'%app)
-               if onos_netcfg['apps'][app_name] == {}:
-                  log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
-               elif onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id'] == olt_serial_id:
-                  log_test.info('The serial Id info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id']))
-                  app_status = True
-        if app_status is not True:
-           assert_equal(True, False)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        assert_equal(cip,None)
-
-    def test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
-        assert_equal(cip,None)
-	log_test.info('Dhcp server rejected client discover with invalid source mac, as expected')
-
-    def test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:01:98:05')
-        assert_equal(cip,None)
-	log_test.info('Dhcp server rejected client discover with invalid source mac, as expected')
-
-    def test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
-        assert_equal(cip,None)
-        log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-
-        ### We can't test this on single uni port setup, hence its not to test
-    @nottest
-    def test_dhcpl2relay_with_N_requests(self, iface = 'veth0',requests=10):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        ip_map = {}
-        for i in range(requests):
-            #mac = RandMAC()._fix()
-	    #log_test.info('mac is %s'%mac)
-            cip, sip = self.send_recv(mac=mac, update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-	    time.sleep(1)
-
-    def test_dhcpl2relay_with_one_release(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcp.release(cip), True)
-        log_test.info('Triggering DHCP discover again after release')
-        cip2, sip2 = self.send_recv(mac=mac)
-        log_test.info('Verifying released IP was given back on rediscover')
-        assert_equal(cip, cip2)
-        log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-        assert_equal(self.dhcp.release(cip2), True)
-
-    @nottest
-    def test_dhcpl2relay_with_Nreleases(self, iface = 'veth0'):
-        mac = None
-        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
-        ip_map = {}
-        for i in range(10):
-            cip, sip = self.send_recv(mac=mac, update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-
-        for ip in ip_map.keys():
-            log_test.info('Releasing IP %s' %ip)
-            assert_equal(self.dhcp.release(ip), True)
-
-        ip_map2 = {}
-        log_test.info('Triggering DHCP discover again after release')
-        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
-        for i in range(len(ip_map.keys())):
-            cip, sip = self.send_recv(mac=mac, update_seed = True)
-            ip_map2[cip] = sip
-
-        log_test.info('Verifying released IPs were given back on rediscover')
-        if ip_map != ip_map2:
-            log_test.info('Map before release %s' %ip_map)
-            log_test.info('Map after release %s' %ip_map2)
-        assert_equal(ip_map, ip_map2)
-
-    @nottest
-    def test_dhcpl2relay_starvation(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-        log_test.info('Verifying 1 ')
-	count = 0
-        while True:
-            #mac = RandMAC()._fix()
-            cip, sip = self.send_recv(mac=mac,update_seed = True,validate = False)
-	    if cip is None:
-		break
-	    else:
-		count += 1
-	assert_equal(count,91)
-        log_test.info('Verifying 2 ')
-        cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
-        assert_equal(cip, None)
-        assert_equal(sip, None)
-
-    def test_dhcpl2relay_with_same_client_and_multiple_discovers(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	log_test.info('Triggering DHCP discover again.')
-	new_cip, new_sip, new_mac, _ = self.dhcp.only_discover(mac=mac)
-	assert_equal(new_cip, cip)
-	log_test.info('got same ip to smae the client when sent discover again, as expected')
-
-    def test_dhcpl2relay_with_same_client_and_multiple_requests(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	log_test.info('Sending DHCP discover and DHCP request.')
-	cip, sip = self.send_recv(mac=mac)
-	mac = self.dhcp.get_mac(cip)[0]
-	log_test.info("Sending DHCP request again.")
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info('got same ip to smae the client when sent request again, as expected')
-
-    def test_dhcpl2relay_with_clients_desired_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '192.168.1.31', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac,desired = True)
-	assert_equal(cip,self.dhcp.seed_ip)
-	log_test.info('Got dhcp client desired IP %s from server %s for mac %s as expected' %
-		  (cip, sip, mac) )
-
-    def test_dhcpl2relay_with_clients_desired_address_out_of_pool(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac,desired = True)
-	assert_not_equal(cip,None)
-	assert_not_equal(cip,self.dhcp.seed_ip)
-	log_test.info('server offered IP from its pool when requested out of pool IP, as expected')
-
-    def test_dhcpl2relay_nak_packet(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-	assert_equal(new_cip, None)
-	log_test.info('server sent NAK packet when requested other IP than that server offered')
-
-    def test_dhcpl2relay_client_requests_with_specific_lease_time_in_discover_message(self, iface = 'veth0',lease_time=700):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.70', iface = iface)
-	self.dhcp.return_option = 'lease'
-	cip, sip, mac, lval = self.dhcp.only_discover(mac=mac,lease_time=True,lease_value=lease_time)
-	assert_equal(lval, lease_time)
-	log_test.info('dhcp server offered IP address with client requested lease time')
-
-    def test_dhcpl2relay_with_client_request_after_reboot(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	log_test.info('client rebooting...')
-	os.system('ifconfig '+iface+' down')
-	time.sleep(5)
-	os.system('ifconfig '+iface+' up')
-	new_cip2, new_sip = self.dhcp.only_request(cip, mac, cl_reboot = True)
-	assert_equal(new_cip2, cip)
-	log_test.info('client got same IP after reboot, as expected')
-
-    def test_dhcpl2relay_after_server_shutting_down(self, iface = 'veth0'):
-        self.get_dhcpd_process()
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	log_test.info('server rebooting...')
-        try:
-         if self.dhcpd_stop(remote_controller = True, dhcpd = 'stop'):
-           time.sleep(5)
-	   log_test.info('DHCP server is stopped ')
-	   new_cip, new_sip = self.dhcp.only_request(cip, mac)
-           assert_equal(new_cip,None)
-         else:
-	   log_test.info('DHCP server is not stopped' )
-           assert_equal(new_cip,None)
-        finally:
-          self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
-
-    def test_dhcpl2relay_after_server_reboot(self, iface = 'veth0'):
-        self.get_dhcpd_process()
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-        assert_not_equal(cip, None)
-        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-        log_test.info('server rebooting...')
-        try:
-         if self.dhcpd_stop(remote_controller = True, dhcpd = 'restart'):
-           time.sleep(5)
-           log_test.info('DHCP server is rebooted')
-           new_cip, new_sip = self.dhcp.only_request(cip, mac)
-           assert_equal(new_cip,cip)
-         else:
-           log_test.info('DHCP server is not stopped' )
-           assert_equal(new_cip,None)
-        finally:
-          self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
-
-    def test_dhcpl2relay_after_server_stop_start(self, iface = 'veth0'):
-        self.get_dhcpd_process()
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-        assert_not_equal(cip, None)
-        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-        log_test.info('server rebooting...')
-        try:
-         if self.dhcpd_stop(remote_controller = True, dhcpd = 'stop'):
-           time.sleep(5)
-           log_test.info('DHCP server is stopped ')
-           new_cip, new_sip = self.dhcp.only_request(cip, mac)
-           assert_equal(new_cip,None)
-         else:
-           log_test.info('DHCP server is not stoppped' )
-           assert_equal(new_cip,None)
-         self.dhcpd_stop(remote_controller = True, dhcpd = 'start')
-         log_test.info('DHCP server is started ')
-         new_cip, new_sip = self.dhcp.only_request(cip, mac)
-         assert_equal(new_cip, cip)
-         log_test.info('client got same IP after server rebooted, as expected')
-        finally:
-          self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
-
-    def test_dhcpl2relay_with_specific_lease_time_in_discover_and_without_in_request_packet(self, iface = 'veth0',lease_time=700):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	self.dhcp.return_option = 'lease'
-	log_test.info('Sending DHCP discover with lease time of 700')
-	cip, sip, mac, lval = self.dhcp.only_discover(mac=mac,lease_time = True, lease_value=lease_time)
-	assert_equal(lval,lease_time)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True)
-	assert_equal(new_cip,cip)
-	assert_not_equal(lval, lease_time) #Negative Test Case
-	log_test.info('client requested lease time in discover packer is not seen in server ACK packet as expected')
-
-    def test_dhcpl2relay_with_specific_lease_time_in_request_and_without_in_discover_packet(self, iface = 'veth0',lease_time=800):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True,lease_value=lease_time)
-	assert_equal(new_cip,cip)
-	assert_equal(lval, lease_time)
-	log_test.info('client requested lease time in request packet seen in servre replied ACK packet as expected')
-
-    @nottest
-    def test_dhcpl2relay_with_client_renew_time(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-	log_test.info('waiting for  renew  time.. a= %s b= %s c= %s'%(new_cip,new_sip,lval))
-	time.sleep(lval)
-	latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-	assert_equal(latest_cip, cip)
-	log_test.info('server renewed client IP when client sends request after renew time, as expected')
-
-    @nottest
-    def test_dhcpl2relay_with_client_rebind_time(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-	log_test.info('waiting for  rebind  time..')
-	time.sleep(lval)
-	latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-	assert_equal(latest_cip, cip)
-        log_test.info('server renewed client IP when client sends request after rebind time, as expected')
-
-    def test_dhcpl2relay_with_client_expected_subnet_mask(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_subnet = '255.255.255.0'
-	self.dhcp.return_option = 'subnet'
-
-	cip, sip, mac, subnet_mask = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(subnet_mask,expected_subnet)
-	log_test.info('subnet mask in server offer packet is same as configured subnet mask in dhcp server')
-
-    def test_dhcpl2relay_with_client_sending_dhcp_request_with_wrong_subnet_mask(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'subnet'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Subnet Mask in DHCP Request.")
-
-    @nottest
-    def test_dhcpl2relay_with_client_expected_router_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_router_address = '20.20.20.1'
-	self.dhcp.return_option = 'router'
-
-	cip, sip, mac, router_address_value = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_router_address, router_address_value)
-	log_test.info('router address in server offer packet is same as configured router address in dhcp server')
-
-    @nottest
-    def test_dhcpl2relay_with_client_sends_dhcp_request_with_wrong_router_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'router'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Router Address in DHCP Request.")
-
-    def test_dhcpl2relay_with_client_expecting_broadcast_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_broadcast_address = '192.168.1.255'
-	self.dhcp.return_option = 'broadcast_address'
-
-	cip, sip, mac, broadcast_address_value = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_broadcast_address, broadcast_address_value)
-	log_test.info('broadcast address in server offer packet is same as configured broadcast address in dhcp server')
-
-    def test_dhcpl2relay_by_client_sending_dhcp_request_with_wrong_broadcast_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'broadcast_address'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Broadcast Address in DHCP Request.")
-
-    def test_dhcpl2relay_with_client_expecting_dns_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_dns_address = '192.168.1.1'
-	self.dhcp.return_option = 'dns'
-
-	cip, sip, mac, dns_address_value = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_dns_address, dns_address_value)
-	log_test.info('dns address in server offer packet is same as configured dns address in dhcp server')
-
-    def test_dhcpl2relay_by_client_sending_request_with_wrong_dns_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'dns'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong DNS Address in DHCP Request.")
-
-
-    def test_dhcpl2relay_transactions_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.dhcpl2relay_stats_calc()
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
-
-	log_test.info("Final Statistics for total transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
-
-    def test_dhcpl2relay_consecutive_successes_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.dhcpl2relay_stats_calc(success_rate = True)
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpl2relay_with_max_clients_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.dhcpl2relay_stats_calc(only_discover = True)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d of sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of clients per second in run %d:%f                                      "
-		    %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of clients per second: %d                                        ",
-		round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpl2relay_consecutive_successful_clients_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.dhcpl2relay_stats_calc(success_rate = True, only_discover = True)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d for sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful clients per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful clients per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpl2relay_concurrent_transactions_per_second(self, iface = 'veth0'):
-	for key in (key for key in g_subscriber_port_map if key < 100):
-	    self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i):
-	    mac = self.get_mac('veth{}'.format(i))
-	    cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
-	    log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-	    self.lock.acquire()
-
-	    if cip:
-		    self.ip_count += 1
-
-	    elif cip is None:
-		    self.failure_count += 1
-
-	    self.lock.notify_all()
-	    self.lock.release()
-
-	for i in range (1,4):
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while self.diff <= 60:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-
-	    self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d"
-			    %(self.ip_count+self.failure_count,self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Final Statistics for total transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    @nottest
-    def test_dhcpl2relay_concurrent_consecutive_successes_per_second(self, iface = 'veth0'):
-	failure_dir = {}
-
-	for key in (key for key in g_subscriber_port_map if key != 100):
-	    self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i, j):
-#		log_test.info("Thread Name:%s",current_thread().name)
-#		failure_dir[current_thread().name] = True
-	    while failure_dir.has_key(current_thread().name) is False:
-		  mac = RandMAC()._fix()
-		  cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
-		  i += 2
-		  log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-		  self.lock.acquire()
-
-		  if cip:
-		     self.ip_count += 1
-		     self.lock.notify_all()
-		     self.lock.release()
-		  elif cip is None:
-		     self.failure_count += 1
-		     failure_dir[current_thread().name] = True
-		     self.lock.notify_all()
-		     self.lock.release()
-		     break
-#		self.lock.notify_all()
-#		self.lock.release()
-
-	for i in range (1,4):
-	    failure_dir = {}
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while len(failure_dir) != 6:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-	    self.transaction_count = round((self.ip_count)/self.diff, 2)
-
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,2))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    @nottest
-    def test_dhcpl2relay_for_concurrent_clients_per_second(self, iface = 'veth0'):
-	for key in (key for key in g_subscriber_port_map if key < 100):
-		self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i):
-#		mac = self.get_mac('veth{}'.format(i))
-	    cip, sip, mac, _ = DHCPTest(iface = 'veth{}'.format(i)).only_discover(mac = RandMAC()._fix())
-	    log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-	    self.lock.acquire()
-
-	    if cip:
-	       self.ip_count += 1
-	    elif cip is None:
-	       self.failure_count += 1
-
-	    self.lock.notify_all()
-	    self.lock.release()
-
-	for i in range (1,4):
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while self.diff <= 60:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-	    self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d of sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of clients per second in run %d:%f                                      "
-		    %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of clients per second: %d                                        ",
-		round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    @nottest
-    def test_dhcpl2relay_with_client_conflict(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
-		  (cip, sip, mac) )
-        self.dhcp1 = DHCPTest(seed_ip = cip, iface = iface)
-	new_cip, new_sip, new_mac, _ = self.dhcp1.only_discover(desired = True)
-	new_cip, new_sip = self.dhcp1.only_request(new_cip, new_mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
-		  (new_cip, new_sip, new_mac) )
-	log_test.info("IP %s alredy consumed by mac %s." % (new_cip, new_mac))
-	log_test.info("Now sending DHCP Request for old DHCP discover.")
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	if new_cip is None:
-	   log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is expected behavior.'
-                    %(new_cip, new_sip, new_mac) )
-	elif new_cip:
-	   log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is not expected behavior as IP %s is already consumed.'
-		    %(new_cip, new_sip, new_mac, new_cip) )
-	   assert_equal(new_cip, None)
-
-    ##### All cluster scenarios on dhcpl2relay has to validate on voltha-setup from client server.
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.change_master_current_cluster(device_id = self.relay_device_id,new_master=standbys[0])
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_withdraw_membership(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_restart_cluster(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
-        self.cord_test_onos_restart()
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
-        cord_test_onos_shutdown(node = master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_standby_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        cord_test_onos_shutdown(node = standbys[0])
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_adding_two_members_to_cluster(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        cord_test_onos_shutdown(node = standbys[0])
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_releasing_dhcp_ip_after_restart_cluster_for_10_times(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
-        for i in range(10):
-            self.cord_test_onos_restart()
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_on_cluster_with_master_controller_only_restarts(self, iface = 'veth0'):
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
-        self.cord_test_onos_restart(node = master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_on_cluster_with_standby_controller_only_restarts(self, iface = 'veth0'):
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
-        self.cord_test_onos_restart(node = standbys[0])
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_by_removing_master_onos_instance(self, iface = 'veth0'):
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_by_removing_onos_instance_member(self, iface = 'veth0'):
-
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=standbys[0])
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-    @nottest
-    def test_dhcpl2relay_by_toggle_master_onos_instance_membership(self, iface = 'veth0'):
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
-        self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_by_toggle_standby_onos_instance_membership(self, iface = 'veth0'):
-        pass
-        status = self.verify_cluster_status(onos_instances=onos_instances)
-        assert_equal(status, True)
-        master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
-        assert_equal(len(standbys),(onos_instances-1))
-        mac = self.get_mac(iface)
-        self.cord_l2_relay_load
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
-        self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
-        self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-        self.cord_l2_relay_load
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcprelay.dhcp.release(cip), True)
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           self.cord_l2_relay_load
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
-
-
-    @nottest
-    def test_dhcpl2relay_by_adding_onos_instance_member(self, iface = 'veth0'):
-        pass
-
-
-
diff --git a/src/test/dhcprelay/__init__.py b/src/test/dhcprelay/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/dhcprelay/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/dhcprelay/dhcprelayTest.py b/src/test/dhcprelay/dhcprelayTest.py
deleted file mode 100644
index db4dd3c..0000000
--- a/src/test/dhcprelay/dhcprelayTest.py
+++ /dev/null
@@ -1,1404 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF AeY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-import time
-import os, sys
-from DHCP import DHCPTest
-from CordTestUtils import get_mac, log_test
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from CordTestServer import cord_test_onos_restart
-from CordLogger import CordLogger
-from portmaps import g_subscriber_port_map
-import threading, random
-from threading import current_thread
-log_test.setLevel('INFO')
-
-class dhcprelay_exchange(CordLogger):
-
-    app = 'org.onosproject.dhcprelay'
-    app_dhcp = 'org.onosproject.dhcp'
-    relay_interfaces_last = ()
-    interface_to_mac_map = {}
-    host_ip_map = {}
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    dhcp_data_dir = os.path.join(test_path, '..', 'setup')
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
-    default_options = [ ('subnet-mask', '255.255.255.0'),
-                     ('broadcast-address', '192.168.1.255'),
-                     ('domain-name-servers', '192.168.1.1'),
-                     ('domain-name', '"mydomain.cord-tester"'),
-                   ]
-    ##specify the IP for the dhcp interface matching the subnet and subnet config
-    ##this is done for each interface dhcpd server would be listening on
-    default_subnet_config = [ ('192.168.1.2',
-'''
-subnet 192.168.1.0 netmask 255.255.255.0 {
-    range 192.168.1.10 192.168.1.100;
-}
-'''), ]
-
-    lock = threading.Condition()
-    ip_count = 0
-    failure_count = 0
-    start_time = 0
-    diff = 0
-
-    transaction_count = 0
-    transactions = 0
-    running_time = 0
-    total_success = 0
-    total_failure = 0
-    #just in case we want to reset ONOS to default network cfg after relay tests
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-    configs = {}
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the dhcprelay app'''
-        OnosCtrl(cls.app_dhcp).deactivate()
-        time.sleep(3)
-        cls.onos_ctrl = OnosCtrl(cls.app)
-        status, _ = cls.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(3)
-        cls.dhcp_relay_setup()
-        ##start dhcpd initially with default config
-        cls.dhcpd_start()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the dhcp relay app'''
-        try:
-            os.unlink('{}/dhcpd.conf'.format(cls.dhcp_data_dir))
-            os.unlink('{}/dhcpd.leases'.format(cls.dhcp_data_dir))
-        except: pass
-        cls.onos_ctrl.deactivate()
-        cls.dhcpd_stop()
-        cls.dhcp_relay_cleanup()
-
-    @classmethod
-    def dhcp_relay_setup(cls):
-        did = OnosCtrl.get_device_id()
-        cls.relay_device_id = did
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.port_map:
-            ##Per subscriber, we use 1 relay port
-            try:
-                relay_port = cls.port_map[cls.port_map['relay_ports'][0]]
-            except:
-                relay_port = cls.port_map['uplink']
-            cls.relay_interface_port = relay_port
-            cls.relay_interfaces = (cls.port_map[cls.relay_interface_port],)
-        else:
-            cls.relay_interface_port = 100
-            cls.relay_interfaces = (g_subscriber_port_map[cls.relay_interface_port],)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        if cls.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in cls.port_map['ports']:
-                port_num = cls.port_map[port]
-                if port_num == cls.port_map['uplink']:
-                    continue
-                ip = cls.get_host_ip(port_num)
-                mac = cls.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure dhcp server virtual interface on the same subnet as first client interface
-            relay_ip = cls.get_host_ip(interface_list[0][0])
-            relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
-            interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
-            cls.onos_interface_load(interface_list)
-
-    @classmethod
-    def dhcp_relay_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        for config in cls.configs.items():
-            OnosCtrl.delete(config)
-        # if cls.onos_restartable is True:
-        #     log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
-        #     return cord_test_onos_restart(config = {})
-
-    @classmethod
-    def onos_load_config(cls, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    @classmethod
-    def onos_interface_load(cls, interface_list):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(cls.relay_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        cls.onos_load_config(interface_dict)
-        cls.configs['interface_config'] = interface_dict
-
-    @classmethod
-    def onos_dhcp_relay_load(cls, server_ip, server_mac):
-        relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
-        dhcp_dict = {'apps':{'org.onosproject.dhcp-relay':{'dhcprelay':
-                                                          {'dhcpserverConnectPoint':relay_device_map,
-                                                           'serverip':server_ip,
-                                                           'servermac':server_mac
-                                                           }
-                                                           }
-                             }
-                     }
-        cls.onos_load_config(dhcp_dict)
-        cls.configs['relay_config'] = dhcp_dict
-
-    @classmethod
-    def get_host_ip(cls, port):
-        if cls.host_ip_map.has_key(port):
-            return cls.host_ip_map[port]
-        cls.host_ip_map[port] = '192.168.1.{}'.format(port)
-        return cls.host_ip_map[port]
-
-    @classmethod
-    def host_load(cls, iface):
-        '''Have ONOS discover the hosts for dhcp-relay responses'''
-        port = g_subscriber_port_map[iface]
-        host = '173.17.1.{}'.format(port)
-        cmds = ( 'ifconfig {} 0'.format(iface),
-                 'ifconfig {0} {1}'.format(iface, host),
-                 'arping -I {0} {1} -c 2'.format(iface, host),
-                 'ifconfig {} 0'.format(iface), )
-        for c in cmds:
-            os.system(c)
-
-    @classmethod
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    @classmethod
-    def dhcpd_start(cls, intf_list = None,
-                    config = default_config, options = default_options,
-                    subnet = default_subnet_config):
-        '''Start the dhcpd server by generating the conf file'''
-        if intf_list is None:
-            intf_list = cls.relay_interfaces
-        ##stop dhcpd if already running
-        cls.dhcpd_stop()
-        dhcp_conf = cls.dhcpd_conf_generate(config = config, options = options,
-                                            subnet = subnet)
-        ##first touch dhcpd.leases if it doesn't exist
-        lease_file = '{}/dhcpd.leases'.format(cls.dhcp_data_dir)
-        if os.access(lease_file, os.F_OK) is False:
-            with open(lease_file, 'w') as fd: pass
-
-        conf_file = '{}/dhcpd.conf'.format(cls.dhcp_data_dir)
-        with open(conf_file, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        #now configure the dhcpd interfaces for various subnets
-        index = 0
-        intf_info = []
-        for ip,_ in subnet:
-            intf = intf_list[index]
-            mac = cls.get_mac(intf)
-            intf_info.append((ip, mac))
-            index += 1
-            os.system('ifconfig {} {}'.format(intf, ip))
-
-        intf_str = ','.join(intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format(conf_file, lease_file, intf_str)
-        log_test.info('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        ret = os.system(dhcpd_cmd)
-        assert_equal(ret, 0)
-        time.sleep(3)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        cls.relay_interfaces = intf_list
-        cls.onos_dhcp_relay_load(*intf_info[0])
-
-    @classmethod
-    def dhcpd_stop(cls):
-        os.system('pkill -9 dhcpd')
-        for intf in cls.relay_interfaces:
-            os.system('ifconfig {} 0'.format(intf))
-
-        cls.relay_interfaces = cls.relay_interfaces_last
-
-    @classmethod
-    def get_mac(cls, iface):
-        if cls.interface_to_mac_map.has_key(iface):
-            return cls.interface_to_mac_map[iface]
-        mac = get_mac(iface, pad = 0)
-        cls.interface_to_mac_map[iface] = mac
-        return mac
-
-    def stats(self,success_rate = False, only_discover = False, iface = 'veth0'):
-
-	self.ip_count = 0
-	self.failure_count = 0
-	self.start_time = 0
-	self.diff = 0
-	self.transaction_count = 0
-
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-	self.start_time = time.time()
-
-	while self.diff <= 60:
-
-	    if only_discover:
-		cip, sip, mac, _ = self.dhcp.only_discover(multiple = True)
-                log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                        (cip, sip, mac))
-	    else:
-	        cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
-
-	    if cip:
-                self.ip_count +=1
-	    elif cip == None:
-		self.failure_count += 1
-                log_test.info('Failed to get ip')
-		if success_rate and self.ip_count > 0:
-			break
-
-	    self.diff = round(time.time() - self.start_time, 0)
-
-	self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-        self.transactions += (self.ip_count+self.failure_count)
-	self.running_time += self.diff
-        self.total_success += self.ip_count
-	self.total_failure += self.failure_count
-
-    def send_recv(self, mac=None, update_seed = False, validate = True):
-        cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
-        if validate:
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                (cip, sip, self.dhcp.get_mac(cip)[0]))
-        return cip,sip
-
-    def test_dhcpRelay_1request(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        self.send_recv(mac=mac)
-
-    def test_dhcpRelay_1request_with_invalid_source_mac_broadcast(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
-        assert_equal(cip,None)
-	log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-
-    def test_dhcpRelay_1request_with_invalid_source_mac_multicast(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:01:98:05')
-        assert_equal(cip,None)
-	log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-
-    def test_dhcpRelay_1request_with_invalid_source_mac_zero(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
-        assert_equal(cip,None)
-        log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-
-    def test_dhcpRelay_Nrequest(self, iface = 'veth0',requests=10):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '192.169.1.1', iface = iface)
-        ip_map = {}
-        for i in range(requests):
-            #mac = RandMAC()._fix()
-	    #log_test.info('mac is %s'%mac)
-            cip, sip = self.send_recv(update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-	    time.sleep(1)
-
-    def test_dhcpRelay_1release(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        assert_equal(self.dhcp.release(cip), True)
-        log_test.info('Triggering DHCP discover again after release')
-        cip2, sip2 = self.send_recv(mac=mac)
-        log_test.info('Verifying released IP was given back on rediscover')
-        assert_equal(cip, cip2)
-        log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-        assert_equal(self.dhcp.release(cip2), True)
-
-    def test_dhcpRelay_Nrelease(self, iface = 'veth0'):
-        mac = None
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
-        ip_map = {}
-        for i in range(10):
-            cip, sip = self.send_recv(mac=mac, update_seed = True)
-            if ip_map.has_key(cip):
-                log_test.info('IP %s given out multiple times' %cip)
-                assert_equal(False, ip_map.has_key(cip))
-            ip_map[cip] = sip
-
-        for ip in ip_map.keys():
-            log_test.info('Releasing IP %s' %ip)
-            assert_equal(self.dhcp.release(ip), True)
-
-        ip_map2 = {}
-        log_test.info('Triggering DHCP discover again after release')
-        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
-        for i in range(len(ip_map.keys())):
-            cip, sip = self.send_recv(mac=mac, update_seed = True)
-            ip_map2[cip] = sip
-
-        log_test.info('Verifying released IPs were given back on rediscover')
-        if ip_map != ip_map2:
-            log_test.info('Map before release %s' %ip_map)
-            log_test.info('Map after release %s' %ip_map2)
-        assert_equal(ip_map, ip_map2)
-
-    def test_dhcpRelay_starvation(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-        log_test.info('Verifying 1 ')
-	count = 0
-        while True:
-            #mac = RandMAC()._fix()
-            cip, sip = self.send_recv(update_seed = True,validate = False)
-	    if cip is None:
-		break
-	    else:
-		count += 1
-	assert_equal(count,91)
-        log_test.info('Verifying 2 ')
-        cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
-        assert_equal(cip, None)
-        assert_equal(sip, None)
-
-    def test_dhcpRelay_same_client_multiple_discover(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	log_test.info('Triggering DHCP discover again.')
-	new_cip, new_sip, new_mac, _ = self.dhcp.only_discover()
-	assert_equal(new_cip, cip)
-	log_test.info('got same ip to smae the client when sent discover again, as expected')
-
-    def test_dhcpRelay_same_client_multiple_request(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	log_test.info('Sending DHCP discover and DHCP request.')
-	cip, sip = self.send_recv(mac=mac)
-	mac = self.dhcp.get_mac(cip)[0]
-	log_test.info("Sending DHCP request again.")
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info('got same ip to smae the client when sent request again, as expected')
-
-    def test_dhcpRelay_client_desired_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '192.168.1.31', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-	assert_equal(cip,self.dhcp.seed_ip)
-	log_test.info('Got dhcp client desired IP %s from server %s for mac %s as expected' %
-		  (cip, sip, mac) )
-
-    def test_dhcpRelay_client_desired_address_out_of_pool(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-	assert_not_equal(cip,None)
-	assert_not_equal(cip,self.dhcp.seed_ip)
-	log_test.info('server offered IP from its pool when requested out of pool IP, as expected')
-
-    def test_dhcpRelay_nak_packet(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-	assert_equal(new_cip, None)
-	log_test.info('server sent NAK packet when requested other IP than that server offered')
-
-
-    def test_dhcpRelay_client_requests_specific_lease_time_in_discover(self, iface = 'veth0',lease_time=700):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.70', iface = iface)
-	self.dhcp.return_option = 'lease'
-	cip, sip, mac, lval = self.dhcp.only_discover(lease_time=True,lease_value=lease_time)
-	assert_equal(lval, lease_time)
-	log_test.info('dhcp server offered IP address with client requested lease time')
-
-    def test_dhcpRelay_client_request_after_reboot(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	log_test.info('client rebooting...')
-	os.system('ifconfig '+iface+' down')
-	time.sleep(5)
-	os.system('ifconfig '+iface+' up')
-	new_cip2, new_sip = self.dhcp.only_request(cip, mac, cl_reboot = True)
-	assert_equal(new_cip2, cip)
-	log_test.info('client got same IP after reboot, as expected')
-
-
-    def test_dhcpRelay_after_server_reboot(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip, None)
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	log_test.info('server rebooting...')
-	self.tearDownClass()
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip,None)
-	self.setUpClass()
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info('client got same IP after server rebooted, as expected')
-
-
-    def test_dhcpRelay_specific_lease_time_only_in_discover_but_not_in_request_packet(self, iface = 'veth0',lease_time=700):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	self.dhcp.return_option = 'lease'
-	log_test.info('Sending DHCP discover with lease time of 700')
-	cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True, lease_value=lease_time)
-	assert_equal(lval,lease_time)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True)
-	assert_equal(new_cip,cip)
-	assert_not_equal(lval, lease_time) #Negative Test Case
-	log_test.info('client requested lease time in discover packer is not seen in server ACK packet as expected')
-
-    def test_dhcpRelay_specific_lease_time_only_in_request_but_not_in_discover_packet(self, iface = 'veth0',lease_time=800):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True,lease_value=lease_time)
-	assert_equal(new_cip,cip)
-	assert_equal(lval, lease_time)
-	log_test.info('client requested lease time in request packet seen in servre replied ACK packet as expected')
-
-    def test_dhcpRelay_client_renew_time(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-	new_options = [('dhcp-renewal-time', 100), ('dhcp-rebinding-time', 125)]
-        options = self.default_options + new_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-	log_test.info('waiting for  renew  time..')
-	time.sleep(lval)
-	latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-	assert_equal(latest_cip, cip)
-	log_test.info('server renewed client IP when client sends request after renew time, as expected')
-
-
-    def test_dhcpRelay_client_rebind_time(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-	new_options = [('dhcp-renewal-time', 100), ('dhcp-rebinding-time', 125)]
-        options = self.default_options + new_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-	log_test.info('waiting for  rebind  time..')
-	time.sleep(lval)
-	latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-	assert_equal(latest_cip, cip)
-        log_test.info('server renewed client IP when client sends request after rebind time, as expected')
-
-
-    def test_dhcpRelay_client_expected_subnet_mask(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_subnet = '255.255.255.0'
-	self.dhcp.return_option = 'subnet'
-
-	cip, sip, mac, subnet_mask = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(subnet_mask,expected_subnet)
-	log_test.info('subnet mask in server offer packet is same as configured subnet mask in dhcp server')
-
-
-    def test_dhcpRelay_client_sends_dhcp_request_with_wrong_subnet_mask(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'subnet'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Subnet Mask in DHCP Request.")
-
-
-    def test_dhcpRelay_client_expected_router_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        config = self.default_config
-	new_options = [('routers', '20.20.20.1')]
-        options = self.default_options + new_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_router_address = '20.20.20.1'
-	self.dhcp.return_option = 'router'
-
-	cip, sip, mac, router_address_value = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_router_address, router_address_value)
-	log_test.info('router address in server offer packet is same as configured router address in dhcp server')
-
-
-    def test_dhcpRelay_client_sends_dhcp_request_with_wrong_router_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'router'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Router Address in DHCP Request.")
-
-
-    def test_dhcpRelay_client_expected_broadcast_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_broadcast_address = '192.168.1.255'
-	self.dhcp.return_option = 'broadcast_address'
-
-	cip, sip, mac, broadcast_address_value = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_broadcast_address, broadcast_address_value)
-	log_test.info('broadcast address in server offer packet is same as configured broadcast address in dhcp server')
-
-    def test_dhcpRelay_client_sends_dhcp_request_with_wrong_broadcast_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'broadcast_address'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong Broadcast Address in DHCP Request.")
-
-
-    def test_dhcpRelay_client_expected_dns_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-	expected_dns_address = '192.168.1.1'
-	self.dhcp.return_option = 'dns'
-
-	cip, sip, mac, dns_address_value = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_equal(expected_dns_address, dns_address_value)
-	log_test.info('dns address in server offer packet is same as configured dns address in dhcp server')
-
-    def test_dhcpRelay_client_sends_request_with_wrong_dns_address(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-		  (cip, sip, mac) )
-	assert_not_equal(cip,None)
-	self.dhcp.send_different_option = 'dns'
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	assert_equal(new_cip, cip)
-	log_test.info("Got DHCP Ack despite of specifying wrong DNS Address in DHCP Request.")
-
-
-    def test_dhcpRelay_transactions_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.stats()
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
-
-	log_test.info("Final Statistics for total transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
-
-    def test_dhcpRelay_consecutive_successes_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.stats(success_rate = True)
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-
-    def test_dhcpRelay_clients_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.stats(only_discover = True)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d of sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of clients per second in run %d:%f                                      "
-		    %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of clients per second: %d                                        ",
-		round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpRelay_consecutive_successful_clients_per_second(self, iface = 'veth0'):
-
-	for i in range(1,4):
-	    self.stats(success_rate = True, only_discover = True)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d for sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful clients per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful clients per second: %d", round(self.total_success/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpRelay_concurrent_transactions_per_second(self, iface = 'veth0'):
-
-        config = self.default_config
-        options = self.default_options
-        subnet =  [ ('192.168.1.2',
-'''
-subnet 192.168.0.0 netmask 255.255.0.0 {
-    range 192.168.1.10 192.168.2.100;
-}
-'''), ]
-
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-	for key in (key for key in g_subscriber_port_map if key < 100):
-	    self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i):
-	    mac = self.get_mac('veth{}'.format(i))
-	    cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
-	    log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-	    self.lock.acquire()
-
-	    if cip:
-		    self.ip_count += 1
-
-	    elif cip is None:
-		    self.failure_count += 1
-
-	    self.lock.notify_all()
-	    self.lock.release()
-
-	for i in range (1,4):
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while self.diff <= 60:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-
-	    self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d"
-			    %(self.ip_count+self.failure_count,self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Final Statistics for total transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpRelay_concurrent_consecutive_successes_per_second(self, iface = 'veth0'):
-
-        config = self.default_config
-        options = self.default_options
-        subnet =  [ ('192.168.1.2',
-'''
-subnet 192.168.0.0 netmask 255.255.0.0 {
-    range 192.168.1.10 192.168.2.100;
-}
-'''), ]
-
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-	failure_dir = {}
-
-	for key in (key for key in g_subscriber_port_map if key != 100):
-	    self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i, j):
-#		log_test.info("Thread Name:%s",current_thread().name)
-#		failure_dir[current_thread().name] = True
-	    while failure_dir.has_key(current_thread().name) is False:
-		  mac = RandMAC()._fix()
-		  cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
-		  i += 2
-		  log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-		  self.lock.acquire()
-
-		  if cip:
-		     self.ip_count += 1
-		     self.lock.notify_all()
-		     self.lock.release()
-		  elif cip is None:
-		     self.failure_count += 1
-		     failure_dir[current_thread().name] = True
-		     self.lock.notify_all()
-		     self.lock.release()
-		     break
-#		self.lock.notify_all()
-#		self.lock.release()
-
-	for i in range (1,4):
-	    failure_dir = {}
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while len(failure_dir) != 6:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-	    self.transaction_count = round((self.ip_count)/self.diff, 2)
-
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-
-	    log_test.info("Statistics for run %d",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of consecutive successful transactions          Running Time ")
-	    log_test.info("                   %d                                   %d        " %(self.ip_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total successful transactions")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of consecutive successes         Running Time ")
-	log_test.info("    %d                                 %d                             %d        " %(self.transactions,
-                 self.total_success, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,2))
-	log_test.info("----------------------------------------------------------------------------------")
-
-    def test_dhcpRelay_concurrent_clients_per_second(self, iface = 'veth0'):
-
-        config = self.default_config
-        options = self.default_options
-        subnet =  [ ('192.168.1.2',
-'''
-subnet 192.168.0.0 netmask 255.255.0.0 {
-    range 192.168.1.10 192.168.2.100;
-}
-'''), ]
-
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-	for key in (key for key in g_subscriber_port_map if key < 100):
-		self.host_load(g_subscriber_port_map[key])
-
-	def thread_fun(i):
-#		mac = self.get_mac('veth{}'.format(i))
-	    cip, sip, mac, _ = DHCPTest(iface = 'veth{}'.format(i)).only_discover(mac = RandMAC()._fix())
-	    log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
-	    self.lock.acquire()
-
-	    if cip:
-	       self.ip_count += 1
-	    elif cip is None:
-	       self.failure_count += 1
-
-	    self.lock.notify_all()
-	    self.lock.release()
-
-	for i in range (1,4):
-	    self.ip_count = 0
-	    self.failure_count = 0
-	    self.start_time = 0
-	    self.diff = 0
-	    self.transaction_count = 0
-	    self.start_time = time.time()
-
-	    while self.diff <= 60:
-		  t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
-		  t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
-		  t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
-		  t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
-		  t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
-		  t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
-
-		  t.start()
-		  t1.start()
-		  t2.start()
-		  t3.start()
-		  t4.start()
-		  t5.start()
-
-		  t.join()
-		  t1.join()
-		  t2.join()
-		  t3.join()
-		  t4.join()
-		  t5.join()
-
-		  self.diff = round(time.time() - self.start_time, 0)
-	    self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-	    self.transactions += (self.ip_count+self.failure_count)
-	    self.running_time += self.diff
-	    self.total_success += self.ip_count
-	    self.total_failure += self.failure_count
-
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("Statistics for run %d of sending only DHCP Discover",i)
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of transactions     No. of successes     No. of failures     Running Time ")
-	    log_test.info("    %d                    %d                     %d                  %d" %(self.ip_count+self.failure_count, 		               self.ip_count, self.failure_count, self.diff))
-	    log_test.info("----------------------------------------------------------------------------------")
-	    log_test.info("No. of clients per second in run %d:%f                                      "
-		    %(i, self.transaction_count))
-	    log_test.info("----------------------------------------------------------------------------------")
-
-	log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Total transactions     Total No. of successes     Total No. of failures     Running Time ")
-	log_test.info("    %d                     %d                         %d                        %d" %(self.transactions,
-                 self.total_success, self.total_failure, self.running_time))
-	log_test.info("----------------------------------------------------------------------------------")
-	log_test.info("Average no. of clients per second: %d                                        ",
-		round(self.transactions/self.running_time,0))
-	log_test.info("----------------------------------------------------------------------------------")
-
-
-    def test_dhcpRelay_client_conflict(self, iface = 'veth0'):
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-	cip, sip, mac, _ = self.dhcp.only_discover()
-	log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
-		  (cip, sip, mac) )
-        self.dhcp1 = DHCPTest(seed_ip = cip, iface = iface)
-	new_cip, new_sip, new_mac, _ = self.dhcp1.only_discover(desired = True)
-	new_cip, new_sip = self.dhcp1.only_request(new_cip, new_mac)
-	log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
-		  (new_cip, new_sip, new_mac) )
-	log_test.info("IP %s alredy consumed by mac %s." % (new_cip, new_mac))
-	log_test.info("Now sending DHCP Request for old DHCP discover.")
-	new_cip, new_sip = self.dhcp.only_request(cip, mac)
-	if new_cip is None:
-	   log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is expected behavior.'
-                    %(new_cip, new_sip, new_mac) )
-	elif new_cip:
-	   log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is not expected behavior as IP %s is already consumed.'
-		    %(new_cip, new_sip, new_mac, new_cip) )
-	   assert_equal(new_cip, None)
diff --git a/src/test/fabric/__init__.py b/src/test/fabric/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/fabric/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/fabric/fabricTest.py b/src/test/fabric/fabricTest.py
deleted file mode 100644
index 461f961..0000000
--- a/src/test/fabric/fabricTest.py
+++ /dev/null
@@ -1,73 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from TestManifest import TestManifest
-from Fabric import FabricMAAS, Fabric
-from CordTestUtils import log_test as log
-import os
-log.setLevel('INFO')
-
-class fabric_exchange(unittest.TestCase):
-
-    node_list = []
-    fabric = None
-    FABRIC_TEST_TIMEOUT = 30
-    key_file = os.getenv('SSH_KEY_FILE', None)
-    api_key = os.getenv('MAAS_API_KEY', 'UNKNOWN')
-
-    @classmethod
-    def setUpClass(cls):
-        if cls.api_key == 'UNKNOWN':
-            return
-        maas = FabricMAAS(api_key = cls.api_key)
-        cls.node_list = maas.get_node_list()
-        cls.fabric = Fabric(cls.node_list, key_file = cls.key_file, verbose = False)
-
-    @deferred(FABRIC_TEST_TIMEOUT)
-    def test_fabric(self):
-        """Test the connectivity between the compute nodes"""
-        df = defer.Deferred()
-        def verify_fabric(df):
-            assert_not_equal(self.fabric, None)
-            failed_nodes = []
-            failed_nodes = self.fabric.ping_neighbors()
-            if failed_nodes:
-                log.info('Failed nodes: %s' %failed_nodes)
-                for node, neighbor, _ in failed_nodes:
-                    log.info('Ping from node %s to neighbor %s Failed' %(node, neighbor))
-            assert_equal(len(failed_nodes), 0)
-            df.callback(0)
-        reactor.callLater(0, verify_fabric, df)
-        return df
diff --git a/src/test/flows/__init__.py b/src/test/flows/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/flows/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/flows/flowsTest.py b/src/test/flows/flowsTest.py
deleted file mode 100644
index fa22b72..0000000
--- a/src/test/flows/flowsTest.py
+++ /dev/null
@@ -1,2929 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from scapy.all import *
-import time
-import json
-import threading
-import os
-from OnosCtrl import OnosCtrl
-from OnosFlowCtrl import OnosFlowCtrl
-from OltConfig import OltConfig
-from CordLogger import CordLogger
-from CordTestUtils import log_test
-import random
-from threading import current_thread
-import collections
-log_test.setLevel('INFO')
-
-class flows_exchange(CordLogger):
-
-    #Use the first available device id as our device id to program flows
-    app = 'org.onosproject.cli'
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    INTF_TX_DEFAULT = 'veth2'
-    INTF_RX_DEFAULT = 'veth0'
-    default_port_map = {
-        PORT_TX_DEFAULT : INTF_TX_DEFAULT,
-        PORT_RX_DEFAULT : INTF_RX_DEFAULT,
-        INTF_TX_DEFAULT : PORT_TX_DEFAULT,
-        INTF_RX_DEFAULT : PORT_RX_DEFAULT
-        }
-
-    def incmac(self, mac):
-	tmp =  str(hex(int('0x'+mac,16)+1).split('x')[1])
-	mac = '0'+ tmp if len(tmp) < 2 else tmp
-	return mac
-
-    def next_mac(self, mac):
-        mac = mac.split(":")
-        mac[5] = self.incmac(mac[5])
-
-        if len(mac[5]) > 2:
-	   mac[0] = self.incmac(mac[0])
-	   mac[5] = '01'
-
-        if len(mac[0]) > 2:
-	   mac[0] = '01'
-	   mac[1] = self.incmac(mac[1])
-	   mac[5] = '01'
-        return ':'.join(mac)
-
-    def to_egress_mac(cls, mac):
-        mac = mac.split(":")
-        mac[4] = '01'
-
-        return ':'.join(mac)
-
-    def inc_ip(self, ip, i):
-
-        ip[i] =str(int(ip[i])+1)
-        return '.'.join(ip)
-
-
-    def next_ip(self, ip):
-
-        lst = ip.split('.')
-        for i in (3,0,-1):
-	    if int(lst[i]) < 255:
-	       return self.inc_ip(lst, i)
-	    elif int(lst[i]) == 255:
-	       lst[i] = '0'
-	       if int(lst[i-1]) < 255:
-		  return self.inc_ip(lst,i-1)
-	       elif int(lst[i-2]) < 255:
-		  lst[i-1] = '0'
-		  return self.inc_ip(lst,i-2)
-	       else:
-		  break
-
-    def to_egress_ip(self, ip):
-        lst=ip.split('.')
-        lst[0] = '182'
-        return '.'.join(lst)
-
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = cls.default_port_map
-        cls.device_id = OnosCtrl.get_device_id()
-        num_ports = len(cls.port_map['ports'] + cls.port_map['relay_ports'])
-        cls.port_offset = int(os.getenv('TEST_INSTANCE', 0)) * num_ports
-
-    def test_flow_mac(self):
-        '''Test Add and verify flows with MAC selectors'''
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:00:01'
-        ingress_mac = '00:00:00:00:00:02'
-
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ethSrc = ingress_mac,
-                            ethDst = egress_mac)
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-                self.success = True
-            sniff(count=2, timeout=5, lfilter = lambda p: p.src == ingress_mac,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        pkt = Ether(src = ingress_mac, dst = egress_mac)/IP()
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ip(self):
-        '''Test Add and verify flows with IPv4 selectors'''
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ethType = '0x0800',
-                            ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
-                            ipDst = ('IPV4_DST', egress_map['ip']+'/32')
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip'],
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-
-    def test_flow_tcp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': 9500 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': 9000 }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            tcpSrc = ingress_map['tcp_port'],
-                            tcpDst = egress_map['tcp_port']
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-                self.success = True
-            sniff(count=2, timeout=5, lfilter = lambda p: TCP in p and p[TCP].dport == egress_map['tcp_port']
-			and p[TCP].sport == ingress_map['tcp_port'], prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        L4 = TCP(sport = ingress_map['tcp_port'], dport = egress_map['tcp_port'])
-        pkt = L2/L3/L4
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_udp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': 9500 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': 9000 }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            udpSrc = ingress_map['udp_port'],
-                            udpDst = egress_map['udp_port']
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-                self.success = True
-            sniff(count=2, timeout=5,
-             lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
-				and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
-        pkt = L2/L3/L4
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_flow_vlan(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:00:01'
-        ingress_mac = '00:00:00:00:00:02'
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ethSrc = ingress_mac,
-                            ethDst = egress_mac,
-			    vlan = 0x10)
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-                log_test.info('Pkt:%s', pkt.show())
-                self.success = True
-            sniff(count=2, timeout=5, lfilter = lambda p:p.src == ingress_mac,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        pkt = Ether(src = ingress_mac, dst = egress_mac)/Dot1Q(vlan = 0x10)/IP()
-	log_test.info("Sending Packet:%s",pkt.show())
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ipv6(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ethType = '0x86dd',
-                            ipSrc = ('IPV6_SRC', ingress_map['ipv6'] + '/48'),
-                            ipDst = ('IPV6_DST', egress_map['ipv6'] + '/48')
-                            )
-
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IPv6].src, pkt[IPv6].dst))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IPv6 in p and p[IPv6].dst == egress_map['ipv6'] and p[IPv6].src == ingress_map['ipv6'],
-                 prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'])
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ipv6_flow_label(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ipv6flow_label = 25
-                            )
-
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s with flow label %s' %(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].fl))
-                self.success = True
-            sniff(count=2, timeout=5, lfilter = lambda p: IPv6 in p and p[IPv6].dst == egress_map['ipv6']
-		and p[IPv6].src == ingress_map['ipv6'] and p[IPv6].fl == 25, prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], fl = 25)
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ipv6_extension_header(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ipv6_extension = 0,
-                            )
-
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s' %(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ipv6_available_extension_headers(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-	for i in [0, 60, 43, 44, 51, 50, 135]:
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ipv6_extension = i,
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s' %(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
-                self.success = True
-            sniff(count=2, timeout=5, lfilter = lambda p: IPv6 in p and p[IPv6].nh == i,
-		    prn = recv_cb, iface = self.port_map[egress])
-
-	for i in [0, 60, 43, 44, 51, 50, 135]:
-	    self.success = False
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = i)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-	    assert_equal(self.success, True)
-
-
-    def test_flow_dscp(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            dscp = 32
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
-			and p[IP].tos == 32,prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 32)
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_available_dscp(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-	dscp = [184, 0, 40, 48, 56, 72, 80, 88, 104, 112, 120, 136, 144, 152, 32, 64, 96, 128, 160, 192, 224]
-	for i in dscp:
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            dscp = i
-	                            )
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
-			and p[IP].tos == i,prn = recv_cb, iface = self.port_map[egress])
-
-	for i in dscp:
-	        self.success = False
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = i)
-	        pkt = L2/L3
-	        log_test.info('Sending a packet to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-	        assert_equal(self.success, True)
-
-    def test_flow_ecn(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ecn = 1
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
-			and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
-				iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-
-    def test_flow_available_ecn(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-	for i in range(4):
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            ecn = i
-	                            )
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
-			and int(bin(p[IP].tos).split('b')[1][-2:],2) == i,prn = recv_cb,
-				iface = self.port_map[egress])
-
-	for i in range(4):
-	        self.success = False
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = i)
-	        pkt = L2/L3
-	        log_test.info('Sending a packet to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-	        assert_equal(self.success, True)
-
-    def test_flow_available_dscp_and_ecn(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-	dscp = [46, 0, 10, 12, 14, 18, 20, 22, 26, 28, 30, 34, 36, 38, 8, 16, 24, 32, 40, 48, 56]
-	for i in dscp:
-		for j in (0,1,2,3):
-		        flow = OnosFlowCtrl(deviceId = self.device_id,
-		                            egressPort = egress + self.port_offset,
-		                            ingressPort = ingress + self.port_offset,
-		                            dscp = i,
-					    ecn = j
-		                            )
-		        result = flow.addFlow()
-		        assert_equal(result, True)
-		        ##wait for flows to be added to ONOS
-		        time.sleep(1)
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IP in p and p[IP].tos == int(bin(i).split('b')[1]+ bin(j).split('b')[1],2)
-			 ,prn = recv_cb, iface = self.port_map[egress])
-
-	for i in dscp:
-		for j in (0,1,2,3):
-
-			self.success = False
-			t = threading.Thread(target = mac_recv_task)
-			t.start()
-			L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-			L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = int(bin(i).split('b')[1]+ bin(j).split('b')[1],2))
-			pkt = L2/L3
-			log_test.info('Sending packets to verify if flows are correct')
-			sendp(pkt, count=50, iface = self.port_map[ingress])
-			t.join()
-			assert_equal(self.success, True)
-
-    def test_flow_icmp(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            icmpv4_type =  '3',
-                            icmpv4_code =  8
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMP type %s, ICMP code %s' %(pkt[ICMP].type, pkt[ICMP].code))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMP in p and p[ICMP].type == 3 and p[ICMP].code == 8,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])/ICMP(type = 3, code = 8)
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmp_different_types(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-	icmp = {'11': [0, 1], '10': 0, '0': 0, '3': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
-		'5': [1, 2, 3], '4': 0, '9': 0, '8': 0}
-	for type,code in icmp.items():
-	    if isinstance(code, list):
-	       for i in code:
-		   flow = OnosFlowCtrl(deviceId = self.device_id,
-				    egressPort = egress + self.port_offset,
-				    ingressPort = ingress + self.port_offset,
-				    icmpv4_type =  type,
-				    icmpv4_code =  i
-				    )
-		   result = flow.addFlow()
-		   assert_equal(result, True)
-		   ##wait for flows to be added to ONOS
-		   time.sleep(1)
-	    else:
-		   flow = OnosFlowCtrl(deviceId = self.device_id,
-				    egressPort = egress + self.port_offset,
-				    ingressPort = ingress + self.port_offset,
-				    icmpv4_type =  type,
-				    icmpv4_code =  code
-				    )
-		   result = flow.addFlow()
-		   assert_equal(result, True)
-		   ##wait for flows to be added to ONOS
-		   time.sleep(1)
-	self.success = False
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMP type %s, ICMP code %s' %(pkt[ICMP].type, pkt[ICMP].code))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMP in p and p[ICMP].type == 3 and p[ICMP].code == 8,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])/ICMP(type = 3, code = 8)
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_EchoRequest(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002'}
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            icmpv6_type =  '128',
-                            icmpv6_code =  0
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6EchoRequest].type, pkt[ICMPv6EchoRequest].code))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6EchoRequest in p and p[ICMPv6EchoRequest].type == 128 and p[ICMPv6EchoRequest].code == 0,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6EchoRequest()
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_EchoReply(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            icmpv6_type =  '129',
-                            icmpv6_code =  0
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6EchoReply].type, pkt[ICMPv6EchoReply].code))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6EchoReply in p and p[ICMPv6EchoReply].type == 129 and p[ICMPv6EchoReply].code == 0,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6EchoReply()
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-
-    def test_flow_icmpv6_DestUnreachable(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-	for i in range(8):
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            icmpv6_type =  '1',
-	                            icmpv6_code =  i
-	                            )
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-	for i in range(8):
-	        self.success = False
-	        def mac_recv_task():
-	            def recv_cb(pkt):
-	                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6DestUnreach].type, pkt[ICMPv6DestUnreach].code))
-	                self.success = True
-	            sniff(count=2, timeout=5,
-	                  lfilter = lambda p: ICMPv6DestUnreach in p and p[ICMPv6DestUnreach].type == 1 and p[ICMPv6DestUnreach].code == i,
-	                  prn = recv_cb, iface = self.port_map[egress])
-
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6DestUnreach(code = i)
-	        pkt = L2/L3
-	        log_test.info('Sending packets to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-	        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_PacketTooBig(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            icmpv6_type =  '2',
-                            icmpv6_code =  0
-                            )
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6PacketTooBig].type, pkt[ICMPv6PacketTooBig].code))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6PacketTooBig in p and p[ICMPv6PacketTooBig].type == 2 and p[ICMPv6PacketTooBig].code == 0,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6PacketTooBig()
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_TimeExceeded(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-	for i in range(2):
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            icmpv6_type =  '3',
-	                            icmpv6_code =  i
-	                            )
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-	for i in range(2):
-	        self.success = False
-	        def mac_recv_task():
-	            def recv_cb(pkt):
-	                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6TimeExceeded].type, pkt[ICMPv6TimeExceeded].code))
-	                self.success = True
-	            sniff(count=2, timeout=5,
-	                  lfilter = lambda p: ICMPv6TimeExceeded in p and p[ICMPv6TimeExceeded].type == 3 and p[ICMPv6TimeExceeded].code == i,
-	                  prn = recv_cb, iface = self.port_map[egress])
-
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6TimeExceeded(code = i)
-	        pkt = L2/L3
-	        log_test.info('Sending packets to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-	        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_ParameterProblem(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-	for i in range(3):
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            icmpv6_type =  '4',
-	                            icmpv6_code =  i
-	                            )
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-	for i in range(3):
-	        self.success = False
-	        def mac_recv_task():
-	            def recv_cb(pkt):
-	                log_test.info('Pkt seen with ICMPv6 type %s, ICMPv6 code %s' %(pkt[ICMPv6ParamProblem].type, pkt[ICMPv6ParamProblem].code))
-	                self.success = True
-	            sniff(count=2, timeout=5,
-	                  lfilter = lambda p: ICMPv6ParamProblem in p and p[ICMPv6ParamProblem].type == 4 and p[ICMPv6ParamProblem].code == i,
-	                  prn = recv_cb, iface = self.port_map[egress])
-
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6ParamProblem(code = i)
-	        pkt = L2/L3
-	        log_test.info('Sending packets to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-	        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_ND_Target_address(self):
-        egress = 1
-        ingress = 2
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002'}
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ipv6_target =  '2001:db8:a0b:12f0:1010:1010:1010:1001')
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 Neighbor Discovery type %s, target address %s' %(pkt[ICMPv6ND_NS].type, pkt[ICMPv6ND_NS].tgt))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6ND_NS in p and p[ICMPv6ND_NS].tgt == '2001:db8:a0b:12f0:1010:1010:1010:1001',
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'])/ICMPv6ND_NS(tgt = '2001:db8:a0b:12f0:1010:1010:1010:1001')
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_ND_SLL(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002'}
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ipv6_sll =   ingress_map['ether'])
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 Neighbor Discovery type %s, Source Link Layer address %s' %(pkt[ICMPv6ND_NS].type, pkt[ICMPv6NDOptSrcLLAddr].lladdr))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6NDOptSrcLLAddr in p and p[ICMPv6NDOptSrcLLAddr].lladdr == ingress_map['ether'],
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'])#, dst = ingress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6ND_NS(tgt =  egress_map['ipv6'])/ICMPv6NDOptSrcLLAddr(lladdr = ingress_map['ether'])
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_icmpv6_NA_TLL(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001'}
-        ingress_map = { 'ether': '00:00:00:00:00:04','ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002'}
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ipv6_tll =   egress_map['ether'])
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ICMPv6 Neighbor Advertisement type %s, Target Link Layer address %s' %(pkt[ICMPv6ND_NA].type, pkt[ICMPv6NDOptDstLLAddr].lladdr))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: ICMPv6NDOptDstLLAddr in p and p[ICMPv6NDOptDstLLAddr].lladdr == ingress_map['ether'],
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'])#, dst = ingress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'], dst = egress_map['ipv6'])/ICMPv6ND_NA(tgt =  ingress_map['ipv6'])/ICMPv6NDOptDstLLAddr(lladdr = ingress_map['ether'])
-        pkt = L2/L3
-        log_test.info('Sending packets to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_flow_ipv6_and_icmpv6(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-        flow = OnosFlowCtrl(deviceId = self.device_id,
-                            egressPort = egress + self.port_offset,
-                            ingressPort = ingress + self.port_offset,
-                            ethType = '0x86dd',
-                            ipSrc = ('IPV6_SRC', ingress_map['ipv6'] + '/48'),
-                            ipDst = ('IPV6_DST', egress_map['ipv6'] + '/48'),
-			    icmpv6_type =  '128',
-                            icmpv6_code =  0
-                            )
-
-        result = flow.addFlow()
-        assert_equal(result, True)
-        ##wait for flows to be added to ONOS
-        time.sleep(1)
-        self.success = False
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IPv6].src, pkt[IPv6].dst))
-                self.success = True
-            sniff(count=2, timeout=5,
-                  lfilter = lambda p: IPv6 in p and p[IPv6].dst == egress_map['ipv6'] and p[IPv6].src == ingress_map['ipv6']
-			and p[ICMPv6EchoRequest].type == 128 and p[ICMPv6EchoRequest].code == 0, prn = recv_cb, iface = self.port_map[egress])
-
-        t = threading.Thread(target = mac_recv_task)
-        t.start()
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'])/ICMPv6EchoRequest()
-        pkt = L2/L3
-        log_test.info('Sending a packet to verify if flows are correct')
-        sendp(pkt, count=50, iface = self.port_map[ingress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def test_5_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-
-
-	for i in range(0,5):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = False
-
-	def mac_recv_task():
-	    def recv_cb(pkt):
-		log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		self.success = True
-	    sniff(count=2, timeout=5, lfilter = lambda p: p.src == '00:00:00:00:00:02',
-		    prn = recv_cb, iface = self.port_map[egress])
-
-	t = threading.Thread(target = mac_recv_task)
-	t.start()
-	pkt = Ether(src = '00:00:00:00:00:02', dst = egress_mac)/IP()
-	log_test.info('Sending packets to verify if flows are correct')
-	sendp(pkt, count=50, iface = self.port_map[ingress])
-	t.join()
-        assert_equal(self.success, True)
-
-
-    def test_500_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,500):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-	self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-		t = threading.Thread(target = mac_recv_task)
-		t.start()
-		pkt = Ether(src = random_src, dst = egress_mac)/IP()
-		log_test.info('Sending packets to verify if flows are correct')
-		sendp(pkt, count=50, iface = self.port_map[ingress])
-		t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-	t2 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:' + hex(random.randrange(50,254)).split('x')[1])
-	t3 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(16,100)).split('x')[1])
-	t4 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(101,240)).split('x')[1])
-	t5 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:f5')
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-
-	if len(success_dir) != 5:
-		self.success = False
-
-        assert_equal(self.success, True)
-
-
-    def test_1k_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,1000):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = egress_mac)/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:' + hex(random.randrange(50,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:09')
-        t4 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(16,150)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(151,250)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:' +  hex(random.randrange(16,150)).split('x')[1])
-        t8 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:' +  hex(random.randrange(151,250)).split('x')[1])
-        t9 = threading.Thread(target = verify_flow, args = '03:00:00:00:00:'+ hex(random.randrange(16,175)).split('x')[1])
-        t10 = threading.Thread(target = verify_flow, args = '03:00:00:00:00:eb')
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-
-    @nottest
-    def test_10k_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-
-	for i in range(0,10000):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = egress_mac)/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,21)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(17,21)).split('x')[1] +':00:00:00:00:' +
-							hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(22,30)).split('x')[1] +':00:00:00:00:' +
-							hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(31,38)).split('x')[1] +':00:00:00:00:' +
-							hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '27:00:00:00:00:37')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_100k_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-
-	for i in range(0,100000):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = egress_mac)/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,41)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(42,72)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(73,100)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(101,136)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '89:01:00:00:00:28')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-
-    @nottest
-    def test_1000k_flow_constant_dst_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:01'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-
-	for i in range(0,1000000):
-	    ingress_mac = self.next_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = egress_mac)/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,21)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(22,50)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(51,75)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(76,95)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '60:0f:00:00:00:91')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_5_flow_constant_src_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:01'
-
-
-	for i in range(0,5):
-	    egress_mac = self.next_mac(egress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = False
-
-	def mac_recv_task():
-	    def recv_cb(pkt):
-		log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		self.success = True
-	    sniff(count=2, timeout=5, lfilter = lambda p: p.src == '00:00:00:00:00:01' and p.dst == '00:00:00:00:01:02',
-		    prn = recv_cb, iface = self.port_map[egress])
-
-	t = threading.Thread(target = mac_recv_task)
-	t.start()
-	pkt = Ether(src = ingress_mac, dst =  '00:00:00:00:01:02')/IP()
-	log_test.info('Sending packets to verify if flows are correct')
-	sendp(pkt, count=50, iface = self.port_map[ingress])
-	t.join()
-        assert_equal(self.success, True)
-
-    def test_500_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,500):
-	    ingress_mac = self.next_mac(ingress_mac)
-	    egress_mac = self.to_egress_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst =  self.to_egress_mac(random_src))/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:' + hex(random.randrange(50,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(16,100)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(101,240)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:f5')
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-	if len(success_dir) != 5:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_1k_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,1000):
-	    ingress_mac = self.next_mac(ingress_mac)
-	    egress_mac = self.to_egress_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-            pkt = Ether(src = random_src, dst =  self.to_egress_mac(random_src))/IP()
-            log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:' + hex(random.randrange(50,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:09')
-        t4 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(16,150)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' +  hex(random.randrange(151,250)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:' +  hex(random.randrange(16,150)).split('x')[1])
-        t8 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:' +  hex(random.randrange(151,250)).split('x')[1])
-        t9 = threading.Thread(target = verify_flow, args = '03:00:00:00:00:'+ hex(random.randrange(16,175)).split('x')[1])
-        t10 = threading.Thread(target = verify_flow, args = '03:00:00:00:00:eb')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_10k_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,10000):
-	    ingress_mac = self.next_mac(ingress_mac)
-	    egress_mac = self.to_egress_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = self.to_egress_mac(random_src))/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,21)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(17,21)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(22,30)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(31,38)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '27:00:00:00:00:37')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_100k_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,100000):
-	    ingress_mac = self.next_mac(ingress_mac)
-	    egress_mac = self.to_egress_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = self.to_egress_mac(random_src))/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,41)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(42,72)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(73,100)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(101,136)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '89:01:00:00:00:28')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_1000k_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	success_dir = {}
-
-	for i in range(0,1000000):
-	    ingress_mac = self.next_mac(ingress_mac)
-	    egress_mac = self.to_egress_mac(ingress_mac)
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-			egressPort = egress + self.port_offset,
-			ingressPort = ingress + self.port_offset,
-			ethSrc = ingress_mac,
-			ethDst = egress_mac)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-        def verify_flow(*r):
-	    random_src = ''.join(r)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5, lfilter = lambda p: p.src == random_src,
-			prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = Ether(src = random_src, dst = egress_mac)/IP()
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-        t1 = threading.Thread(target = verify_flow, args = '00:00:00:00:00:01')
-        t2 = threading.Thread(target = verify_flow, args = '01:00:00:00:00:' + hex(random.randrange(16,254)).split('x')[1])
-        t3 = threading.Thread(target = verify_flow, args = '02:00:00:00:00:'+ hex(random.randrange(16,254)).split('x')[1])
-        t4 = threading.Thread(target = verify_flow, args = '05:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t5 = threading.Thread(target = verify_flow, args = '07:00:00:00:00:' +  hex(random.randrange(16,254)).split('x')[1])
-        t6 = threading.Thread(target = verify_flow, args = hex(random.randrange(16,21)).split('x')[1] + ':00:00:00:00:08')
-        t7 = threading.Thread(target = verify_flow, args = hex(random.randrange(22,50)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t8 = threading.Thread(target = verify_flow, args = hex(random.randrange(51,75)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t9 = threading.Thread(target = verify_flow, args = hex(random.randrange(76,95)).split('x')[1] +':00:00:00:00:' +
-                                                        hex(random.randrange(16,254)).split('x')[1])
-
-        t10 = threading.Thread(target = verify_flow, args = '60:0f:00:00:00:91')
-
-        t1.start()
-        t2.start()
-        t3.start()
-        t4.start()
-        t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-        t1.join()
-        t2.join()
-        t3.join()
-        t4.join()
-        t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_rate_100_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	flows_added = 0
-	stats_dir = collections.OrderedDict()
-	running_time = 0
-
-
-	for i in range(1,4):
-	    start_time = time.time()
-	    for j in range(0,100):
-		ingress_mac = self.next_mac(ingress_mac)
-		egress_mac = self.to_egress_mac(ingress_mac)
-
-		flow = OnosFlowCtrl(deviceId = self.device_id,
-			    egressPort = egress + self.port_offset,
-			    ingressPort = ingress + self.port_offset,
-			    ethSrc = ingress_mac,
-			    ethDst = egress_mac)
-		result = flow.addFlow()
-		assert_equal(result, True)
-		flows_added += 1
-	##wait for flows to be added to ONOS
-		time.sleep(1)
-		log_test.info("%d flow added.",j+1)
-	    end_time = time.time()
-	    stats_dir['run '+str(i)] =  round((end_time - start_time),2)
-	for t in stats_dir.items():
-		log_test.info("----------------------------------------------")
-		log_test.info("Statics for %s",t[0])
-		log_test.info("----------------------------------------------")
-		log_test.info("No. of flows added               Running Time ")
-		log_test.info("       %d                             %s     " %(100, t[1]))
-		running_time += float(t[1])
-
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Final Statics")
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Total No. of flows added               Total Running Time               Average no. of flows per second ")
-	log_test.info("       %d                                %s second                               %d                     "
-		%(flows_added, running_time, round(flows_added/running_time,0)))
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-
-
-
-    def test_rate_500_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	flows_added = 0
-	stats_dir = collections.OrderedDict()
-	running_time = 0
-
-
-	for i in range(1,4):
-	    start_time = time.time()
-	    for j in range(0,500):
-		ingress_mac = self.next_mac(ingress_mac)
-		egress_mac = self.to_egress_mac(ingress_mac)
-
-		flow = OnosFlowCtrl(deviceId = self.device_id,
-			    egressPort = egress + self.port_offset,
-			    ingressPort = ingress + self.port_offset,
-			    ethSrc = ingress_mac,
-			    ethDst = egress_mac)
-		result = flow.addFlow()
-		assert_equal(result, True)
-		flows_added += 1
-	##wait for flows to be added to ONOS
-		time.sleep(1)
-		log_test.info("%d flow added.",j+1)
-	    end_time = time.time()
-	    stats_dir['run '+str(i)] =  round((end_time - start_time),2)
-	for t in stats_dir.items():
-	    log_test.info("----------------------------------------------")
-	    log_test.info("Statics for %s",t[0])
-	    log_test.info("----------------------------------------------")
-	    log_test.info("No. of flows added               Running Time ")
-	    log_test.info("       %d                             %s     " %(500, t[1]))
-	    running_time += float(t[1])
-
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Final Statics")
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Total No. of flows added               Total Running Time               Average no. of flows per second ")
-	log_test.info("       %d                                %s second                               %d                     "
-		%(flows_added, running_time, round(flows_added/running_time,0)))
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-
-    def test_rate_1k_flow_mac(self):
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:01:00'
-        ingress_mac = '00:00:00:00:00:00'
-	flows_added = 0
-	stats_dir = collections.OrderedDict()
-	running_time = 0
-
-
-	for i in range(1,4):
-	    start_time = time.time()
-	    for j in range(0,1000):
-		ingress_mac = self.next_mac(ingress_mac)
-		egress_mac = self.to_egress_mac(ingress_mac)
-
-		flow = OnosFlowCtrl(deviceId = self.device_id,
-			    egressPort = egress + self.port_offset,
-			    ingressPort = ingress + self.port_offset,
-			    ethSrc = ingress_mac,
-			    ethDst = egress_mac)
-		result = flow.addFlow()
-		assert_equal(result, True)
-		flows_added += 1
-	    ##wait for flows to be added to ONOS
-		time.sleep(1)
-		log_test.info("%d flow added.",j+1)
-	    end_time = time.time()
-	    stats_dir['run '+str(i)] =  round((end_time - start_time),2)
-	for t in stats_dir.items():
-	    log_test.info("----------------------------------------------")
-	    log_test.info("Statics for %s",t[0])
-	    log_test.info("----------------------------------------------")
-	    log_test.info("No. of flows added               Running Time ")
-	    log_test.info("       %d                             %s     " %(1000, t[1]))
-	    running_time += float(t[1])
-
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Final Statics")
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-	log_test.info("Total No. of flows added               Total Running Time               Average no. of flows per second ")
-	log_test.info("       %d                                %s second                               %d                     "
-		%(flows_added, running_time, round(flows_added/running_time,0)))
-	log_test.info("-------------------------------------------------------------------------------------------------------")
-
-
-    def test_500_flow_ip(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	success_dir = {}
-
-	for i in range(0,500):
-	    ingress_map['ip'] = self.next_ip(ingress_map['ip'])
-	    assert_not_equal(ingress_map['ip'], None)
-	    egress_map['ip'] = self.to_egress_ip(ingress_map['ip'])
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ethType = '0x0800',
-				ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-				ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-				)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    random_dst = self.to_egress_ip(random_src)
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-		    success_dir[current_thread().name] = True
-
-		sniff(count=2, timeout=5,  lfilter = lambda p: IP in p and p[IP].dst == random_dst and p[IP].src == random_src
-			,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = random_src, dst = random_dst)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '192.0.0.1')
-	t2 = threading.Thread(target = verify_flow, args = '192.0.0.' + str(random.randrange(10,100,1)))
-	t3 = threading.Thread(target = verify_flow, args = '192.0.0.' +  str(random.randrange(101,255,1)))
-	t4 = threading.Thread(target = verify_flow, args = '192.0.1.' +  str(random.randrange(1,235,1)))
-	t5 = threading.Thread(target = verify_flow, args = '192.0.1.244')
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-
-	if len(success_dir) < 5 or len(success_dir) > 5:
-		self.success = False
-        assert_equal(self.success, True)
-
-
-    @nottest
-    def test_1k_flow_ip(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	success_dir ={}
-
-	for i in range(0,1000):
-	    ingress_map['ip'] =  self.next_ip(ingress_map['ip'])
-	    assert_not_equal(ingress_map['ip'], None)
-	    egress_map['ip'] =  self.to_egress_ip(ingress_map['ip'])
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ethType = '0x0800',
-				ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-				ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-				)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    random_dst = self.to_egress_ip(random_src)
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-		    success_dir[current_thread().name] = True
-
-		sniff(count=2, timeout=5,  lfilter = lambda p: IP in p and p[IP].dst == random_dst and p[IP].src == random_src
-			,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = random_src, dst = random_dst)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '192.0.0.1')
-	t2 = threading.Thread(target = verify_flow, args = '192.0.0.' + str(random.randrange(10,255,1)))
-	t3 = threading.Thread(target = verify_flow, args = '192.0.1.' +  str(random.randrange(1,100,1)))
-	t4 = threading.Thread(target = verify_flow, args = '192.0.1.' +  str(random.randrange(101,255,1)))
-	t5 = threading.Thread(target = verify_flow, args = '192.0.2.' +  str(random.randrange(1,100,1)))
-	t6 = threading.Thread(target = verify_flow, args = '192.0.2.' +  str(random.randrange(101,255,1)))
-	t7 = threading.Thread(target = verify_flow, args = '192.0.3.' +  str(random.randrange(1,100,1)))
-	t8 = threading.Thread(target = verify_flow, args = '192.0.3.' +  str(random.randrange(101,200,1)))
-	t9 = threading.Thread(target = verify_flow, args = '192.0.'+  str(random.randrange(0,3,1)) + '.' +
-				str(random.randrange(1,255,1)))
-        t10 = threading.Thread(target = verify_flow, args = '192.0.3.232')
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-		self.success = False
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_10k_flow_ip(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	success_dir = {}
-
-	for i in range(0,10000):
-	    ingress_map['ip'] =  self.next_ip(ingress_map['ip'])
-	    assert_not_equal(ingress_map['ip'], None)
-	    egress_map['ip'] =  self.to_egress_ip(ingress_map['ip'])
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ethType = '0x0800',
-				ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-				ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-				)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    random_dst = self.to_egress_ip(random_src)
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,  lfilter = lambda p: IP in p and p[IP].dst == random_dst and p[IP].src == random_src
-		      ,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = random_src, dst = random_dst)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '192.0.0.1')
-	t2 = threading.Thread(target = verify_flow, args = '192.0.0.' + str(random.randrange(1,255,1)))
-	t3 = threading.Thread(target = verify_flow, args = '192.0.5.' +  str(random.randrange(1,255,1)))
-	t4 = threading.Thread(target = verify_flow, args = '192.0.10.' +  str(random.randrange(1,255,1)))
-	t5 = threading.Thread(target = verify_flow, args = '192.0.15.' +  str(random.randrange(1,255,1)))
-	t6 = threading.Thread(target = verify_flow, args = '192.0.20.' +  str(random.randrange(1,255,1)))
-	t7 = threading.Thread(target = verify_flow, args = '192.0.25.' +  str(random.randrange(1,255,1)))
-	t8 = threading.Thread(target = verify_flow, args = '192.0.30.' +  str(random.randrange(1,255,1)))
-	t9 = threading.Thread(target = verify_flow, args = '192.0.'+  str(random.randrange(0,39,1)) + '.' +
-				str(random.randrange(1,255,1)))
-        t10 = threading.Thread(target = verify_flow, args = '192.0.39.16')
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-		self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_100k_flow_ip(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	success_dir = {}
-
-	for i in range(0,100000):
-	    ingress_map['ip'] =  self.next_ip(ingress_map['ip'])
-	    assert_not_equal(ingress_map['ip'], None)
-	    egress_map['ip'] =  self.to_egress_ip(ingress_map['ip'])
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ethType = '0x0800',
-				ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-				ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-				)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    random_dst = self.to_egress_ip(random_src)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,  lfilter = lambda p: IP in p and p[IP].dst == random_dst and p[IP].src == random_src
-			,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = random_src, dst = random_dst)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '192.0.0.1')
-	t2 = threading.Thread(target = verify_flow, args = '192.0.0.' + str(random.randrange(1,255,1)))
-	t3 = threading.Thread(target = verify_flow, args = '192.0.50.' +  str(random.randrange(1,255,1)))
-	t4 = threading.Thread(target = verify_flow, args = '192.0.100.' +  str(random.randrange(1,255,1)))
-	t5 = threading.Thread(target = verify_flow, args = '192.0.150.' +  str(random.randrange(1,255,1)))
-	t6 = threading.Thread(target = verify_flow, args = '192.0.200.' +  str(random.randrange(1,255,1)))
-	t7 = threading.Thread(target = verify_flow, args = '192.0.250.' +  str(random.randrange(1,255,1)))
-	t8 = threading.Thread(target = verify_flow, args = '192.1.'+str(random.randrange(1,75,1)) + '.'
-							+ str(random.randrange(1,255,1)))
-	t9 = threading.Thread(target = verify_flow, args = '192.1.'+str(random.randrange(76,134,1)) + '.'
-							+ str(random.randrange(1,255,1)))
-        t10 = threading.Thread(target = verify_flow, args = '192.1.134.160')
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-		self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_1000k_flow_ip(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	success_dir = {}
-
-	for i in range(0,1000000):
-	    ingress_map['ip'] =  self.next_ip(ingress_map['ip'])
-	    assert_not_equal(ingress_map['ip'], None)
-	    egress_map['ip'] =  self.to_egress_ip(ingress_map['ip'])
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				ethType = '0x0800',
-				ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-				ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-				)
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_src = ''.join(r)
-	    random_dst = self.to_egress_ip(random_src)
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-		    success_dir[current_thread().name] = True
-
-		sniff(count=2, timeout=5,  lfilter = lambda p: IP in p and p[IP].dst == random_dst and p[IP].src == random_src
-			,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = random_src, dst = random_dst)
-	    pkt = L2/L3
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = '192.0.0.1')
-	t2 = threading.Thread(target = verify_flow, args = '192.0.50.' + str(random.randrange(1,255,1)))
-	t3 = threading.Thread(target = verify_flow, args = '192.0.100.' +  str(random.randrange(1,255,1)))
-	t4 = threading.Thread(target = verify_flow, args = '192.0.150.' +  str(random.randrange(1,255,1)))
-	t5 = threading.Thread(target = verify_flow, args = '192.0.200.' +  str(random.randrange(1,255,1)))
-	t6 = threading.Thread(target = verify_flow, args = '192.0.250.' +  str(random.randrange(1,255,1)))
-	t7 = threading.Thread(target = verify_flow, args = '192.0.250.' +  str(random.randrange(1,255,1)))
-	t8 = threading.Thread(target = verify_flow, args = '192.1.'+str(random.randrange(1,150,1)) + '.'
-							+ str(random.randrange(1,255,1)))
-	t9 = threading.Thread(target = verify_flow, args = '192.1.'+str(random.randrange(152,255,1)) + '.'
-							+ str(random.randrange(1,255,1)))
-        t10 = threading.Thread(target = verify_flow, args = '192.15.66.64')
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-		self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_500_flow_tcp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': 3100 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': 1100 }
-	success_dir = {}
-
-	for i in range(0,500):
-	    ingress_map['tcp_port'] += 1
-	    egress_map['tcp_port'] += 1
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				tcpSrc = ingress_map['tcp_port'],
-				tcpDst = egress_map['tcp_port']
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d Flow added",i+1)
-        self.success = True
-
-	def verify_flow(*r):
-	    random_sport = int(''.join(r))
-	    random_dport = random_sport + 2000
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,
-		      lfilter = lambda p: TCP in p and p[TCP].dport == random_dport and p[TCP].sport == random_sport                    			  ,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	    L4 = TCP(sport = random_sport, dport = random_dport)
-	    pkt = L2/L3/L4
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-	t1 = threading.Thread(target = verify_flow, args = str(1101))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(1110,1250,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(1251,1400,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(1401,1590,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(1600))
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-
-	if len(success_dir) != 5:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_1k_flow_tcp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': 3100 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': 1100 }
-	success_dir = {}
-
-	for i in range(0,1000):
-	    ingress_map['tcp_port'] += 1
-	    egress_map['tcp_port'] += 1
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				tcpSrc = ingress_map['tcp_port'],
-				tcpDst = egress_map['tcp_port']
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-
-        self.success = True
-
-	def verify_flow(*r):
-	    random_sport = int(''.join(r))
-	    random_dport = random_sport + 2000
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,
-		      lfilter = lambda p: TCP in p and p[TCP].dport == random_dport and p[TCP].sport == random_sport                    			  ,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	    L4 = TCP(sport = random_sport, dport = random_dport)
-	    pkt = L2/L3/L4
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = str(1101))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(1110,1350,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(1351,1500,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(1501,1700,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(random.randrange(1701,1900,1)))
-	t6 = threading.Thread(target = verify_flow, args = str(random.randrange(1901,2000,1)))
-	t7 = threading.Thread(target = verify_flow, args = str(random.randrange(2000,2050,1)))
-	t8 = threading.Thread(target = verify_flow, args = str(random.randrange(2050,2080,1)))
-	t9 = threading.Thread(target = verify_flow, args = str(random.randrange(1102,2100,1)))
-	t10 = threading.Thread(target = verify_flow, args = str(2100))
-
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_10k_flow_tcp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': 31000 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': 11000 }
-	success_dir = {}
-
-	for i in range(0,10000):
-	    ingress_map['tcp_port'] += 1
-	    egress_map['tcp_port'] += 1
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				tcpSrc = ingress_map['tcp_port'],
-				tcpDst = egress_map['tcp_port']
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-
-        self.success = True
-
-	def verify_flow(*r):
-	    random_sport = int(''.join(r))
-	    random_dport = random_sport + 20000
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,
-		      lfilter = lambda p: TCP in p and p[TCP].dport == random_dport
-			    and p[TCP].sport == random_sport,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	    L4 = TCP(sport = random_sport, dport = random_dport)
-	    pkt = L2/L3/L4
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = str(11001))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(11110,12501,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(12510,14001,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(14010,15900,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(random.randrange(16000,17000,1)))
-	t6 = threading.Thread(target = verify_flow, args = str(random.randrange(17001,18000,1)))
-	t7 = threading.Thread(target = verify_flow, args = str(random.randrange(18000,19000,1)))
-	t8 = threading.Thread(target = verify_flow, args = str(random.randrange(19000,20980,1)))
-	t9 = threading.Thread(target = verify_flow, args = str(random.randrange(11002,21000,1)))
-	t10 = threading.Thread(target = verify_flow, args = str(21000))
-
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_500_flow_udp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': 3100 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': 1100 }
-	success_dir = {}
-
-	for i in range(0,500):
-	    ingress_map['udp_port'] += 1
-	    egress_map['udp_port'] += 1
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				udpSrc = ingress_map['udp_port'],
-				udpDst = egress_map['udp_port']
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-
-        self.success = True
-
-	def verify_flow(*r):
-	    random_sport = int(''.join(r))
-	    random_dport = random_sport + 2000
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,
-		      lfilter = lambda p: UDP in p and p[UDP].dport == random_dport and p[UDP].sport == random_sport                    			  ,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	    L4 = UDP(sport = random_sport, dport = random_dport)
-	    pkt = L2/L3/L4
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = str(1101))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(1110,1250,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(1251,1400,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(1401,1590,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(1600))
-
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-
-	if len(success_dir) != 5:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    def test_1k_flow_udp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': 3100 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': 1100 }
-	success_dir = {}
-
-	for i in range(0,100000):
-	    ingress_map['udp_port'] += 1
-	    egress_map['udp_port'] += 1
-
-	    flow = OnosFlowCtrl(deviceId = self.device_id,
-				egressPort = egress + self.port_offset,
-				ingressPort = ingress + self.port_offset,
-				udpSrc = ingress_map['udp_port'],
-				udpDst = egress_map['udp_port']
-				)
-
-	    result = flow.addFlow()
-	    assert_equal(result, True)
-	    ##wait for flows to be added to ONOS
-	    time.sleep(1)
-	    log_test.info("%d flow added.",i+1)
-
-        self.success = True
-
-	def verify_flow(*r):
-	    random_sport = int(''.join(r))
-	    random_dport = random_sport + 2000
-
-	    def mac_recv_task():
-		def recv_cb(pkt):
-		    log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-		    success_dir[current_thread().name] = True
-		sniff(count=2, timeout=5,
-		      lfilter = lambda p: UDP in p and p[UDP].dport == random_dport and p[UDP].sport == random_sport                    			  ,prn = recv_cb, iface = self.port_map[egress])
-
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	    L4 = UDP(sport = random_sport, dport = random_dport)
-	    pkt = L2/L3/L4
-	    log_test.info('Sending packets to verify if flows are correct')
-	    sendp(pkt, count=50, iface = self.port_map[ingress])
-	    t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = str(1101))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(1110,1350,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(1351,1500,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(1501,1700,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(random.randrange(1701,1900,1)))
-	t6 = threading.Thread(target = verify_flow, args = str(random.randrange(1901,2000,1)))
-	t7 = threading.Thread(target = verify_flow, args = str(random.randrange(2000,2050,1)))
-	t8 = threading.Thread(target = verify_flow, args = str(random.randrange(2050,2080,1)))
-	t9 = threading.Thread(target = verify_flow, args = str(random.randrange(1102,2100,1)))
-	t10 = threading.Thread(target = verify_flow, args = str(2100))
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-                self.success = False
-
-        assert_equal(self.success, True)
-
-    @nottest
-    def test_10k_flow_udp_port(self):
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': 31000 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': 11000 }
-	success_dir = {}
-
-	for i in range(0,10000):
-		ingress_map['udp_port'] += 1
-		egress_map['udp_port'] += 1
-
-	        flow = OnosFlowCtrl(deviceId = self.device_id,
-	                            egressPort = egress + self.port_offset,
-	                            ingressPort = ingress + self.port_offset,
-	                            udpSrc = ingress_map['udp_port'],
-	                            udpDst = egress_map['udp_port']
-	                            )
-
-	        result = flow.addFlow()
-	        assert_equal(result, True)
-	        ##wait for flows to be added to ONOS
-	        time.sleep(1)
-		log_test.info("%d flow added.",i+1)
-
-        self.success = True
-
-	def verify_flow(*r):
-		random_sport = int(''.join(r))
-		random_dport = random_sport + 20000
-
-	        def mac_recv_task():
-
-	            def recv_cb(pkt):
-	                log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-			success_dir[current_thread().name] = True
-	            sniff(count=2, timeout=5,
-	                  lfilter = lambda p: UDP in p and p[UDP].dport == random_dport and p[UDP].sport == random_sport                    			  ,prn = recv_cb, iface = self.port_map[egress])
-
-	        t = threading.Thread(target = mac_recv_task)
-	        t.start()
-	        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-	        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-	        L4 = UDP(sport = random_sport, dport = random_dport)
-	        pkt = L2/L3/L4
-	        log_test.info('Sending packets to verify if flows are correct')
-	        sendp(pkt, count=50, iface = self.port_map[ingress])
-	        t.join()
-
-	t1 = threading.Thread(target = verify_flow, args = str(11001))
-	t2 = threading.Thread(target = verify_flow, args = str(random.randrange(11110,12501,1)))
-	t3 = threading.Thread(target = verify_flow, args = str(random.randrange(12510,14001,1)))
-	t4 = threading.Thread(target = verify_flow, args = str(random.randrange(14010,15900,1)))
-	t5 = threading.Thread(target = verify_flow, args = str(random.randrange(16000,17000,1)))
-	t6 = threading.Thread(target = verify_flow, args = str(random.randrange(17001,18000,1)))
-	t7 = threading.Thread(target = verify_flow, args = str(random.randrange(18000,19000,1)))
-	t8 = threading.Thread(target = verify_flow, args = str(random.randrange(19000,20980,1)))
-	t9 = threading.Thread(target = verify_flow, args = str(random.randrange(11002,21000,1)))
-	t10 = threading.Thread(target = verify_flow, args = str(21000))
-
-
-	t1.start()
-	t2.start()
-	t3.start()
-	t4.start()
-	t5.start()
-        t6.start()
-        t7.start()
-        t8.start()
-        t9.start()
-        t10.start()
-
-	t1.join()
-	t2.join()
-	t3.join()
-	t4.join()
-	t5.join()
-        t6.join()
-        t7.join()
-        t8.join()
-        t9.join()
-        t10.join()
-
-	if len(success_dir) != 10:
-                self.success = False
-        assert_equal(self.success, True)
diff --git a/src/test/fsm/noseMd5AuthHolder.py b/src/test/fsm/noseMd5AuthHolder.py
deleted file mode 100644
index 72b9fde..0000000
--- a/src/test/fsm/noseMd5AuthHolder.py
+++ /dev/null
@@ -1,57 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-def initMd5AuthHolderFsmTable(obj,St,Ev):
-    return {
-
-    ## CurrentState                          Event                                      Actions                   NextState
-
-      (St.ST_EAP_SETUP,                      Ev.EVT_EAP_SETUP                       ):( (obj._eapSetup,),         St.ST_EAP_START),
-
-    ## CurrentState                          Event                                      Actions                   NextState
-
-      (St.ST_EAP_MD5_CHALLENGE,              Ev.EVT_EAP_MD5_CHALLENGE               ):( (obj._eapMd5Challenge,),  St.ST_EAP_STATUS),
-
-    ## CurrentState                          Event                                      Actions                   NextState
-
-      (St.ST_EAP_STATUS,                     Ev.EVT_EAP_STATUS                      ):( (obj._eapStatus,),        St.ST_EAP_MD5_DONE),
-
-    ## CurrentState                          Event                                      Actions                   NextState
-
-      (St.ST_EAP_ID_REQ,                     Ev.EVT_EAP_ID_REQ                      ):( (obj._eapIdReq,),         St.ST_EAP_MD5_CHALLENGE),
-
-    ## CurrentState                          Event                                      Actions                   NextState
-
-      (St.ST_EAP_START,                      Ev.EVT_EAP_START                       ):( (obj._eapStart,),         St.ST_EAP_ID_REQ),
-
-}
-
diff --git a/src/test/fsm/nosePAPAuthHolder.py b/src/test/fsm/nosePAPAuthHolder.py
deleted file mode 100644
index 2c872c4..0000000
--- a/src/test/fsm/nosePAPAuthHolder.py
+++ /dev/null
@@ -1,57 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-def initPAPAuthHolderFsmTable(obj,St,Ev):
-    return {
-
-    ## CurrentState                           Event                                       Actions                 NextState
-
-      (St.ST_EAP_SETUP,                       Ev.EVT_EAP_SETUP                        ):( (obj._eapSetup,),       St.ST_EAP_START),
-
-    ## CurrentState                           Event                                       Actions                 NextState
-
-      (St.ST_EAP_PAP_PASSWD_REQ,              Ev.EVT_EAP_PAP_PASSWD_REQ               ):( (obj._eapPAPPassReq,),  St.ST_EAP_PAP_DONE),
-
-    ## CurrentState                           Event                                       Actions                 NextState
-
-      (St.ST_EAP_PAP_USER_REQ,                Ev.EVT_EAP_PAP_USER_REQ                 ):( (obj._eapPAPUserReq,),  St.ST_EAP_PAP_PASSWD_REQ),
-
-    ## CurrentState                           Event                                       Actions                 NextState
-
-      (St.ST_EAP_ID_REQ,                      Ev.EVT_EAP_ID_REQ                       ):( (obj._eapIdReq,),       St.ST_EAP_PAP_USER_REQ),
-
-    ## CurrentState                           Event                                       Actions                 NextState
-
-      (St.ST_EAP_START,                       Ev.EVT_EAP_START                        ):( (obj._eapStart,),       St.ST_EAP_ID_REQ),
-
-}
-
diff --git a/src/test/fsm/noseTlsAuthHolder.py b/src/test/fsm/noseTlsAuthHolder.py
deleted file mode 100644
index 1d9bc68..0000000
--- a/src/test/fsm/noseTlsAuthHolder.py
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-def initTlsAuthHolderFsmTable(obj,St,Ev):
-    return {
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_TLS_HELLO_REQ,                       Ev.EVT_EAP_TLS_HELLO_REQ                        ):( (obj._eapTlsHelloReq,),          St.ST_EAP_TLS_CERT_REQ),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_ID_REQ,                              Ev.EVT_EAP_ID_REQ                               ):( (obj._eapIdReq,),                St.ST_EAP_TLS_HELLO_REQ),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_SETUP,                               Ev.EVT_EAP_SETUP                                ):( (obj._eapSetup,),                St.ST_EAP_START),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_TLS_FINISHED,                        Ev.EVT_EAP_TLS_FINISHED                         ):( (obj._eapTlsFinished,),          St.ST_EAP_TLS_DONE),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_START,                               Ev.EVT_EAP_START                                ):( (obj._eapStart,),                St.ST_EAP_ID_REQ),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_TLS_CHANGE_CIPHER_SPEC,              Ev.EVT_EAP_TLS_CHANGE_CIPHER_SPEC               ):( (obj._eapTlsChangeCipherSpec,),  St.ST_EAP_TLS_FINISHED),
-
-    ## CurrentState                                   Event                                               Actions                          NextState
-
-      (St.ST_EAP_TLS_CERT_REQ,                        Ev.EVT_EAP_TLS_CERT_REQ                         ):( (obj._eapTlsCertReq,),           St.ST_EAP_TLS_CHANGE_CIPHER_SPEC),
-
-}
-
diff --git a/src/test/igmp/__init__.py b/src/test/igmp/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/igmp/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/igmp/igmpTest.json b/src/test/igmp/igmpTest.json
deleted file mode 100644
index 9a8c163..0000000
--- a/src/test/igmp/igmpTest.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-    "ROVER_TIMEOUT": 200, 
-    "MGROUP2": "239.2.2.3", 
-    "MCAST_TRAFFIC_TIMEOUT": 10, 
-    "ROVER_TEST_TIMEOUT": 300, 
-    "V_INF1": "veth0", 
-    "V_INF2": "veth1", 
-    "MINVALIDGROUP2": "239.255.255.255", 
-    "MINVALIDGROUP1": "255.255.255.255", 
-    "NEGATIVE_TRAFFIC_STATUS": 1, 
-    "MGROUP1": "239.1.2.3", 
-    "IP_DST": "224.0.0.22", 
-    "PORT_RX_DEFAULT": 1, 
-    "IGMP_DST_MAC": "01:00:5e:00:00:16", 
-    "IGMP_QUERY_TIMEOUT": 60, 
-    "IGMP_SRC_MAC": "5a:e1:ac:ec:4d:a1", 
-    "IGMP_TEST_TIMEOUT": 5, 
-    "ROVER_JOIN_TIMEOUT": 60, 
-    "PORT_TX_DEFAULT": 2, 
-    "IP_SRC": "1.2.3.4"
-}
\ No newline at end of file
diff --git a/src/test/igmp/igmpTest.py b/src/test/igmp/igmpTest.py
deleted file mode 100644
index 166139b..0000000
--- a/src/test/igmp/igmpTest.py
+++ /dev/null
@@ -1,2219 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from twisted.internet import defer
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from scapy.all import *
-from select import select as socket_select
-import time, monotonic
-import os
-import random
-import threading
-from IGMP import *
-from McastTraffic import *
-from Stats import Stats
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from Channels import IgmpChannel
-from CordLogger import CordLogger
-from CordTestConfig import setup_module, teardown_module
-from CordTestUtils import log_test
-log_test.setLevel('INFO')
-
-class IGMPTestState:
-
-      def __init__(self, groups = [], df = None, state = 0):
-            self.df = df
-            self.state = state
-            self.counter = 0
-            self.groups = groups
-            self.group_map = {} ##create a send/recv count map
-            for g in groups:
-                self.group_map[g] = (Stats(), Stats())
-
-      def update(self, group, tx = 0, rx = 0, t = 0):
-            self.counter += 1
-            index = 0 if rx == 0 else 1
-            v = tx if rx == 0 else rx
-            if self.group_map.has_key(group):
-                  self.group_map[group][index].update(packets = v, t = t)
-
-      def update_state(self):
-          self.state = self.state ^ 1
-
-class igmp_exchange(CordLogger):
-
-    V_INF1 = 'veth0'
-    MGROUP1 = '239.1.2.3'
-    MGROUP2 = '239.2.2.3'
-    MINVALIDGROUP1 = '255.255.255.255'
-    MINVALIDGROUP2 = '239.255.255.255'
-    MMACGROUP1 = "01:00:5e:01:02:03"
-    MMACGROUP2 = "01:00:5e:02:02:03"
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.0.22'
-    NEGATIVE_TRAFFIC_STATUS = 1
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-    IGMP_TEST_TIMEOUT = 5
-    IGMP_QUERY_TIMEOUT = 60
-    MCAST_TRAFFIC_TIMEOUT = 20
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    max_packets = 100
-    app = 'org.opencord.igmp'
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
-    ROVER_TEST_TIMEOUT = 300 #3600*86
-    ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
-    ROVER_JOIN_TIMEOUT = 60
-    VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.VOLTHA_ENABLED is False:
-            OnosCtrl.config_device_driver()
-            OnosCtrl.cord_olt_config(cls.olt)
-        time.sleep(2)
-
-    @classmethod
-    def tearDownClass(cls):
-        if cls.VOLTHA_ENABLED is False:
-            OnosCtrl.config_device_driver(driver = 'ovs')
-
-    def setUp(self):
-        ''' Activate the igmp app'''
-        super(igmp_exchange, self).setUp()
-        self.onos_ctrl = OnosCtrl(self.app)
-	self.onos_ctrl.activate()
-        self.igmp_channel = IgmpChannel()
-
-    def tearDown(self):
-        super(igmp_exchange, self).tearDown()
-
-    def onos_load_config(self, config):
-	log_test.info('onos load config is %s'%config)
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-    def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
-          return
-          ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
-	  if flag: #to maintain seperate group-source pair.
-	      for i in range(len(groups)):
-		  d = {}
-		  d['source'] = src_list[i] or '0.0.0.0'
-		  d['group'] = groups[i]
-		  ssm_xlate_list.append(d)
-	  else:
-              for g in groups:
-                  for s in src_list:
-                      d = {}
-                      d['source'] = s or '0.0.0.0'
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-          self.onos_load_config(ssm_dict)
-          cord_port_map = {}
-          for g in groups:
-                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
-          self.igmp_channel.cord_port_table_load(cord_port_map)
-          time.sleep(2)
-
-    def mcast_ip_range(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def random_mcast_ip(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def source_ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def get_igmp_intf(self):
-        inst = os.getenv('TEST_INSTANCE', None)
-        if not inst:
-            return 'veth0'
-        inst = int(inst) + 1
-        if inst >= self.port_map['uplink']:
-            inst += 1
-        if self.port_map.has_key(inst):
-              return self.port_map[inst]
-        return 'veth0'
-
-    def igmp_verify_join(self, igmpStateList):
-        sendState, recvState = igmpStateList
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            tx = tx_stats.count
-            assert_greater(tx, 0)
-            rx_stats = recvState.group_map[g][1]
-            rx = rx_stats.count
-            assert_greater(rx, 0)
-            log_test.info('Receive stats %s for group %s' %(rx_stats, g))
-
-        log_test.info('IGMP test verification success')
-
-    def igmp_verify_leave(self, igmpStateList, leave_groups):
-        sendState, recvState = igmpStateList[0], igmpStateList[1]
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            rx_stats = recvState.group_map[g][1]
-            tx = tx_stats.count
-            rx = rx_stats.count
-            assert_greater(tx, 0)
-            if g not in leave_groups:
-                log_test.info('Received %d packets for group %s' %(rx, g))
-        for g in leave_groups:
-            rx = recvState.group_map[g][1].count
-            assert_equal(rx, 0)
-
-        log_test.info('IGMP test verification success')
-
-    def mcast_traffic_timer(self):
-          log_test.info('MCAST traffic timer expiry')
-          self.mcastTraffic.stopReceives()
-
-    def send_mcast_cb(self, send_state):
-        for g in send_state.groups:
-            send_state.update(g, tx = 1)
-        return 0
-
-    ##Runs in the context of twisted reactor thread
-    def igmp_recv(self, igmpState):
-        s = socket_select([self.recv_socket], [], [], 1.0)
-        if self.recv_socket in s[0]:
-              p = self.recv_socket.recv()
-              try:
-                    send_time = float(p.payload.load)
-                    recv_time = monotonic.monotonic()
-              except:
-                    log_test.info('Unexpected Payload received: %s' %p.payload.load)
-                    return 0
-              #log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
-              igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
-        return 0
-
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface=iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-        self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
-              gr.sources = src_list
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        if rec_queryCount == None:
-            log_test.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
-            resp = srp1(pkt, iface=iface)
-        else:
-            log_test.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
-            resp = srp1(pkt, iface=iface)
-#       resp = srp1(pkt, iface=iface) if rec_queryCount else srp3(pkt, iface=iface)
-        resp[0].summary()
-        log_test.info('Sent IGMP join for group %s and received a query packet and  printing packet' %groups)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-	log_test.info('entering into igmp leave function')
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface = iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_leave_listening_group_specific_query(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        log_test.info('Sending IGMP leave for group %s and waiting for one group specific query packet and printing the packet' %groups)
-        resp = srp1(pkt, iface=iface)
-        resp[0].summary()
-        log_test.info('Sent IGMP leave for group %s and received a group specific query packet and printing packet' %groups)
-        if delay != 0:
-            time.sleep(delay)
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_join_verify_traffic(self):
-        groups = [self.MGROUP1, self.MGROUP1]
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        mcastTraffic = McastTraffic(groups, iface= tx_intf, cb = self.send_mcast_cb, arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = rx_intf, type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                #log_test.info('Sending IGMP leave for groups: %s' %groups)
-                self.send_igmp_leave(groups, iface = rx_intf, delay = 2)
-                self.recv_socket.close()
-                self.igmp_verify_join(stateList)
-                self.df.callback(0)
-
-        self.send_igmp_join(groups, iface = rx_intf)
-        mcastTraffic.start()
-        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
-        reactor.callLater(0, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
-    def test_igmp_leave_verify_traffic(self):
-        groups = [self.MGROUP1]
-        leave_groups = [self.MGROUP1]
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        IGMPTestState(groups = groups, df = df)
-        tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        mcastTraffic = McastTraffic(groups, iface= tx_intf, cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = rx_intf, type = ETH_P_IP)
-
-	mcastTraffic.start()
-	self.send_igmp_join(groups, iface = rx_intf)
-        time.sleep(5)
-	self.send_igmp_leave(leave_groups, delay = 3, iface = rx_intf)
-        time.sleep(10)
-	join_state = IGMPTestState(groups = leave_groups)
-	status = self.igmp_not_recv_task(rx_intf, leave_groups, join_state)
-	log_test.info('verified status for igmp recv task %s'%status)
-	assert status == 1 , 'EXPECTED RESULT'
-	self.df.callback(0)
-        return df
-
-    @deferred(timeout=100)
-    def test_igmp_leave_join_loop(self):
-        self.groups = ['226.0.1.1', '227.0.0.1', '228.0.0.1', '229.0.0.1', '230.0.0.1' ]
-        self.src_list = ['3.4.5.6', '7.8.9.10']
-	self.onos_ssm_table_load(self.groups,src_list=self.src_list)
-        df = defer.Deferred()
-        self.df = df
-        self.iterations = 0
-        self.num_groups = len(self.groups)
-        self.MAX_TEST_ITERATIONS = 10
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-
-        def igmp_srp_task(v):
-              if self.iterations < self.MAX_TEST_ITERATIONS:
-                    if v == 1:
-                          ##join test
-                          self.num_groups = random.randint(0, len(self.groups))
-                          self.send_igmp_join(self.groups[:self.num_groups],
-                                              src_list = self.src_list,
-                                              iface = rx_intf, delay = 0)
-                    else:
-                          self.send_igmp_leave(self.groups[:self.num_groups],
-                                               src_list = self.src_list,
-                                               iface = rx_intf, delay = 0)
-                    self.iterations += 1
-                    v ^= 1
-                    reactor.callLater(1.0 + 0.5*self.num_groups,
-                                      igmp_srp_task, v)
-              else:
-                    self.df.callback(0)
-
-        reactor.callLater(0, igmp_srp_task, 1)
-        return df
-
-    def igmp_join_task(self, intf, groups, state, src_list = ['1.2.3.4']):
-          #self.onos_ssm_table_load(groups, src_list)
-          igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                        gaddr=self.IP_DST)
-          for g in groups:
-                gr = IGMPv3gr(rtype = IGMP_V3_GR_TYPE_INCLUDE, mcaddr = g)
-                gr.sources = src_list
-                igmp.grps.append(gr)
-
-          for g in groups:
-                state.group_map[g][0].update(1, t = monotonic.monotonic())
-
-          pkt = self.igmp_eth/self.igmp_ip/igmp
-          IGMPv3.fixup(pkt)
-          sendp(pkt, iface=intf)
-          log_test.debug('Returning from join task')
-
-    def igmp_recv_task(self, intf, groups, join_state):
-          recv_socket = L3PacketSocket(iface = intf, type = ETH_P_IP)
-          group_map = {}
-          for g in groups:
-                group_map[g] = [0,0]
-
-          log_test.info('Verifying join interface should receive multicast data')
-          while True:
-                p = recv_socket.recv()
-                if p.dst in groups and group_map[p.dst][0] == 0:
-                      group_map[p.dst][0] += 1
-                      group_map[p.dst][1] = monotonic.monotonic()
-                      c = 0
-                      for g in groups:
-                            c += group_map[g][0]
-                      if c == len(groups):
-                            break
-          for g in groups:
-                join_start = join_state.group_map[g][0].start
-                recv_time = group_map[g][1] * 1000000
-                delta = (recv_time - join_start)
-                log_test.info('Join for group %s received in %.3f usecs' %
-                         (g, delta))
-
-          recv_socket.close()
-          log_test.debug('Returning from recv task')
-
-    def igmp_not_recv_task(self, intf, groups, join_state):
-	  log_test.info('Entering igmp not recv task loop')
-          recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
-          group_map = {}
-          for g in groups:
-                group_map[g] = [0,0]
-
-          log_test.info('Verifying join interface, should not receive any multicast data')
-          self.NEGATIVE_TRAFFIC_STATUS = 1
-          def igmp_recv_cb(pkt):
-                log_test.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
-                self.NEGATIVE_TRAFFIC_STATUS = 2
-          sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: IP in p and p[IP].dst in groups,
-                timeout = 3, opened_socket = recv_socket)
-          recv_socket.close()
-          return self.NEGATIVE_TRAFFIC_STATUS
-
-    def group_latency_check(self, groups):
-          tasks = []
-          self.send_igmp_leave(groups = groups)
-          join_state = IGMPTestState(groups = groups)
-          tasks.append(threading.Thread(target=self.igmp_join_task, args = ('veth0', groups, join_state,)))
-          traffic_state = IGMPTestState(groups = groups)
-          mcast_traffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                       arg = traffic_state)
-          mcast_traffic.start()
-          tasks.append(threading.Thread(target=self.igmp_recv_task, args = ('veth0', groups, join_state)))
-          for t in tasks:
-                t.start()
-          for t in tasks:
-                t.join()
-
-          mcast_traffic.stop()
-          self.send_igmp_leave(groups = groups)
-          return
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmp_1group_join_latency(self):
-        groups = ['239.0.1.1']
-        df = defer.Deferred()
-        def igmp_1group_join_latency():
-              self.group_latency_check(groups)
-              df.callback(0)
-        reactor.callLater(0, igmp_1group_join_latency)
-        return df
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmp_2group_join_latency(self):
-        groups = [self.MGROUP1, self.MGROUP1]
-        df = defer.Deferred()
-        def igmp_2group_join_latency():
-            self.group_latency_check(groups)
-            df.callback(0)
-        reactor.callLater(0, igmp_2group_join_latency)
-        return df
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmp_Ngroup_join_latency(self):
-        groups = ['239.0.1.1', '240.0.1.1', '241.0.1.1', '242.0.1.1']
-        df = defer.Deferred()
-        def igmp_Ngroup_join_latency():
-            self.group_latency_check(groups)
-            df.callback(0)
-        reactor.callLater(0, igmp_Ngroup_join_latency)
-        return df
-
-    def test_igmp_join_rover_all(self):
-          s = (224 << 24) | 1
-          #e = (225 << 24) | (255 << 16) | (255 << 16) | 255
-          e = (224 << 24) | 10
-          for i in xrange(s, e+1):
-                if i&0xff:
-                      ip = '%d.%d.%d.%d'%((i>>24)&0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
-                self.send_igmp_join([ip], delay = 0)
-
-    @deferred(timeout=ROVER_TEST_TIMEOUT)
-    def test_igmp_join_rover(self):
-          df = defer.Deferred()
-          iface = self.get_igmp_intf()
-          self.df = df
-          self.count = 0
-          self.timeout = 0
-          self.complete = False
-          def igmp_join_timer():
-                self.timeout += self.ROVER_JOIN_TIMEOUT
-                log_test.info('IGMP joins sent: %d' %self.count)
-                if self.timeout >= self.ROVER_TIMEOUT:
-                      self.complete = True
-                reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
-
-          reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
-          self.start_channel = (224 << 24) | 1
-          self.end_channel = (224 << 24) | 200 #(225 << 24) | (255 << 16) | (255 << 16) | 255
-          self.current_channel = self.start_channel
-          def igmp_join_rover(self):
-                #e = (224 << 24) | 10
-                chan = self.current_channel
-                self.current_channel += 1
-                if self.current_channel >= self.end_channel:
-                      chan = self.current_channel = self.start_channel
-                if chan&0xff:
-                      ip = '%d.%d.%d.%d'%((chan>>24)&0xff, (chan>>16)&0xff, (chan>>8)&0xff, chan&0xff)
-                      self.send_igmp_join([ip], delay = 0, ssm_load = False, iface = iface)
-                      self.count += 1
-                if self.complete == True:
-                      log_test.info('%d IGMP joins sent in %d seconds over %s' %(self.count, self.timeout, iface))
-                      self.df.callback(0)
-                else:
-                      reactor.callLater(0, igmp_join_rover, self)
-          reactor.callLater(0, igmp_join_rover, self)
-          return df
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmp_query(self):
-        groups = ['224.0.0.1'] ##igmp query group
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        self.df = df
-        self.recv_socket = L2Socket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_query_timeout():
-              def igmp_query_cb(pkt):
-		    log_test.info('received igmp query packet is %s'%pkt.show())
-                    log_test.info('Got IGMP query packet from %s for %s' %(pkt[IP].src, pkt[IP].dst))
-                    assert_equal(pkt[IP].dst, '224.0.0.1')
-              sniff(prn = igmp_query_cb, count=1, lfilter = lambda p: IP in p and p[IP].dst in groups,
-                    opened_socket = self.recv_socket)
-              self.recv_socket.close()
-              self.df.callback(0)
-
-        #self.send_igmp_join(groups)
-        self.test_timer = reactor.callLater(self.IGMP_QUERY_TIMEOUT, igmp_query_timeout)
-        return df
-
-    def igmp_send_joins_different_groups_srclist(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None):
-        g1 = groups[0]
-        g2 = groups[1]
-        sourcelist1 = sources[0]
-        sourcelist2 = sources[1]
-        eth = Ether(dst = self.IGMP_DST_MAC,type = ETH_P_IP)
-        ip = IP(dst = self.IP_DST)
-        log_test.info('Sending join message for the group %s' %g1)
-        self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
-        eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
-        ip = IP(dst = g2)
-        log_test.info('Sending join message for group %s' %g2)
-        self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
-        log_test.info('Done with igmp_send_joins_different_groups_srclist')
-
-    def igmp_send_joins_different_groups_srclist_wait_query_packets(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None, query_group1 = None, query_group2 = None):
-        g1 = groups[0]
-        g2 = groups[1]
-        sourcelist1 = sources[0]
-        sourcelist2 = sources[1]
-        eth = Ether(dst = self.MMACGROUP1, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
-        src_ip = ip_src or self.IP_SRC
-        ip = IP(dst = g1, src = src_ip)
-        if query_group1 is 'group1':
-            log_test.info('Sending join message for the group %s and waiting for a query packet on join interface' %g1)
-            self.send_igmp_join_recvQuery((g1,), None, src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
-        else:
-            log_test.info('Sending join message for the group %s' %g1)
-            self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
-        eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
-        ip = IP(dst = g2, src = src_ip)
-        if query_group2 is 'group2':
-            log_test.info('Sending join message for the group %s and waiting for a query packet on join interface' %g2)
-            self.send_igmp_join_recvQuery((g2,), None, src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
-        else:
-            log_test.info('Sending join message for group %s' %g2)
-            self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
-
-    def igmp_joins_leave(self,groups,src_list,again_join = False, df = None):
-        groups1 = [groups[0]]
-        groups2 = [groups[1]]
-	src1 = [src_list[0]]
-	src2 = [src_list[1]]
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (src1, src2), intf = self.V_INF1, delay = 2)
-
-        src_ip = src1[0]
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-
-        igmpState2 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-	dst_mac = self.iptomac(groups1[0])
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb,
-                                     arg = igmpState1)
-        src_ip = src2[0]
-	dst_mac = self.iptomac(groups1[0])
-        mcastTraffic2 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb,
-                                     arg = igmpState2)
-        mcastTraffic1.start()
-        mcastTraffic2.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        join_state2 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        log_test.info('Interface is receiving multicast groups %s' %groups1)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state2)
-        log_test.info('Interface is receiving multicast groups %s' %groups2)
-        log_test.info('Interface is sending leave message for groups %s now' %groups2)
-        self.send_igmp_leave(groups = groups2, src_list = src2, iface = self.V_INF1, delay = 2)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        target4 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state2)
-        assert target4 == 1, 'EXPECTED FAILURE'
-        if again_join:
-            dst_mac = '01:00:5e:02:02:03'
-            ip_dst = '239.2.2.3'
-            eth = Ether(dst = dst_mac,  type = ETH_P_IP)
-            ip = IP(dst = ip_dst)
-            log_test.info('Interface sending join message again for the groups %s' %groups2)
-            self.send_igmp_join(groups2, src_list = [src_ip], ip_pkt = eth/ip, iface = self.V_INF1, delay = 2)
-            self.igmp_recv_task(self.V_INF1, groups2, join_state2)
-            log_test.info('Interface is receiving multicast groups %s again' %groups2)
-            self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-            log_test.info('Interface is still receiving from multicast groups %s' %groups1)
-        else:
-            log_test.info('Ended test case')
-        mcastTraffic1.stop()
-        mcastTraffic2.stop()
-
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_2joins_1leave(self):
-        df = defer.Deferred()
-        def igmp_2joins_1leave():
-	      groups = ['234.2.3.4','236.8.7.9']
-	      src_list = ['2.3.4.5','5.4.3.2']
-	      self.onos_ssm_table_load(groups,src_list = src_list)
-              self.igmp_joins_leave(groups,src_list,again_join = False, df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_2joins_1leave)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+25)
-    def test_igmp_2joins_1leave_and_join_again(self):
-        df = defer.Deferred()
-        def igmp_2joins_1leave_join_again():
-	      groups = ['234.2.3.4','236.8.7.9']
-	      src_list = ['2.3.4.5','5.4.3.2']
-	      self.onos_ssm_table_load(groups,src_list = src_list)
-              self.igmp_joins_leave(groups,src_list,again_join = True, df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_2joins_1leave_join_again)
-        return df
-
-    def igmp_not_in_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-	self.onos_ssm_table_load(groups1 + groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','2.2.2.2', '5.5.5.5'])
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                     (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        src_ip = '6.6.6.6'
-	dst_mac = self.iptomac(groups1[0])
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface = 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        log_test.info('Interface should not receive from multicast groups %s from an interface, which is expected' %groups1)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 2, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s, working as expected' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_not_in_src_list(self):
-        df = defer.Deferred()
-        def igmp_not_in_src_list():
-              self.igmp_not_in_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_not_in_src_list)
-        return df
-
-    def igmp_change_to_exclude_src_list(self, df = None):
-        groups1 = [self.random_mcast_ip()]
-        groups2 = [self.random_mcast_ip()]
-	self.onos_ssm_table_load(groups1 + groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','2.2.2.2', '5.5.5.5'])
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        src_ip = '2.2.2.2'
-	dst_mac=self.iptomac(groups1[0])
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
-        target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target2 == 2, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_change_to_exclude_src_list(self):
-        df = defer.Deferred()
-        def igmp_change_to_exclude_src_list():
-              self.igmp_change_to_exclude_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_change_to_exclude_src_list)
-        return df
-
-    def igmp_include_to_allow_src_list(self, df = None):
-        groups1 = [self.random_mcast_ip()] #(self.MGROUP1,)
-	self.onos_ssm_table_load(groups1,src_list = ['4.4.4.4','6.6.6.6'])
-	self.send_igmp_join(groups = groups1, src_list = ['4.4.4.4'],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-        src_ip = '4.4.4.4'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = src_ip,
-					cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-	mcastTraffic1.stop()
-	mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
-                                        cb = self.send_mcast_cb, arg = igmpState1)
-	self.send_igmp_join(groups = groups1, src_list = ['6.6.6.6'],record_type = IGMP_V3_GR_TYPE_ALLOW_NEW,
-                                         iface = self.V_INF1)
-	mcastTraffic2.start()
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        mcastTraffic2.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
-    def test_igmp_include_to_allow_src_list(self):
-        df = defer.Deferred()
-        def igmp_include_to_allow_src_list():
-              self.igmp_include_to_allow_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_include_to_allow_src_list)
-        return df
-
-    def igmp_include_to_block_src_list(self, df = None):
-        groups1 = [self.random_mcast_ip()]   #groups1 = (self.MGROUP1,)
-	self.onos_ssm_table_load(groups1,src_list = ['4.4.4.4','6.6.6.6'])
-	self.send_igmp_join(groups = groups1, src_list = ['4.4.4.4','6.6.6.6'],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
-					cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-	mcastTraffic1.stop()
-	self.send_igmp_join(groups = groups1, src_list = ['6.6.6.6'],record_type = IGMP_V3_GR_TYPE_BLOCK_OLD,
-                                         iface = self.V_INF1)
-	mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = '6.6.6.6',
-                                        cb = self.send_mcast_cb, arg = igmpState1)
-	mcastTraffic2.start()
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-	assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is still receiving traffic from old multicast group %s even after we send block for source list' %groups1)
-        mcastTraffic2.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
-    def test_igmp_include_to_block_src_list(self):
-        df = defer.Deferred()
-        def igmp_include_to_block_src_list():
-              self.igmp_include_to_block_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_include_to_block_src_list)
-        return df
-
-
-    def igmp_change_to_include_src_list(self, df = None):
-        groups1 = [self.random_mcast_ip()]
-	src_list = ['4.4.4.4','6.6.6.6']
-	self.onos_ssm_table_load(groups1,src_list = src_list)
-        self.send_igmp_leave(groups = groups1, src_list = src_list,
-                             iface = self.V_INF1, delay = 2)
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2',src_ip = src_list[0],
-                                          cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-	mcastTraffic1.stop()
-        self.send_igmp_join(groups = groups1, src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-        mcastTraffic2 = McastTraffic(groups1, iface= 'veth2',src_ip = src_list[1],
-                                        cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic2.start()
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        mcastTraffic2.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_change_to_include_src_list(self):
-        df = defer.Deferred()
-        def igmp_change_to_include_src_list():
-              self.igmp_change_to_include_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_change_to_include_src_list)
-        return df
-
-    #this test case failing because group in include receiving multicast traffic from any of the source
-    def igmp_exclude_to_allow_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-	self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','6.6.6.6', '7.7.7.7', '8.8.8.8','5.5.5.5'])
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
-                             iface = self.V_INF1, delay = 2)
-
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_exclude_to_allow_src_list(self):
-        df = defer.Deferred()
-        def igmp_exclude_to_allow_src_list():
-              self.igmp_exclude_to_allow_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_exclude_to_allow_src_list)
-        return df
-
-    def igmp_exclude_to_block_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-	self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','7.7.7.7','5.5.5.5'])
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
-                             iface = self.V_INF1, delay = 2)
-
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
-                             iface = self.V_INF1, delay = 2)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_exclude_to_block_src_list(self):
-        df = defer.Deferred()
-        def igmp_exclude_to_block_src_list():
-              self.igmp_exclude_to_block_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_exclude_to_block_src_list)
-        return df
-
-    #this test case failing because group in include mode recieves traffic from other sources also.
-    def igmp_new_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-	self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','5.5.5.5','6.6.6.6'])
-        self.igmp_send_joins_different_groups_srclist(groups1+groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '6.6.6.6'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s after sending join with new source list' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
-    def test_igmp_new_src_list(self):
-        df = defer.Deferred()
-        def igmp_new_src_list():
-              self.igmp_new_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_new_src_list)
-        return df
-
-    def igmp_block_old_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-	self.onos_ssm_table_load(groups1+groups2,src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4','5.5.5.5','6.6.6.6','7.7.7.7'])
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s' %groups2)
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
-                                                      intf = self.V_INF1, delay = 2)
-        target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target2 == 2, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s after sending join with block old source list' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_block_old_src_list(self):
-        df = defer.Deferred()
-        def igmp_block_old_src_list():
-              self.igmp_block_old_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_block_old_src_list)
-        return df
-
-    def igmp_include_empty_src_list(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['0']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s when we sent join with source list is empty' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_include_empty_src_list(self):
-        ## '''Disabling this test as scapy IGMP doesn't work with empty source lists'''
-        df = defer.Deferred()
-        def igmp_include_empty_src_list():
-              self.igmp_include_empty_src_list(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_include_empty_src_list)
-        return df
-
-    def igmp_exclude_empty_src_list(self, df = None):
-        groups2 = (self.MGROUP2,)
-        self.send_igmp_leave(groups = groups2, src_list = ['0'], iface = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving multicast groups %s' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_exclude_empty_src_list(self):
-        df = defer.Deferred()
-        def igmp_exclude_empty_src_list():
-              self.igmp_exclude_empty_src_list()
-              df.callback(0)
-        reactor.callLater(0, igmp_exclude_empty_src_list)
-        return df
-
-    def igmp_join_sourceip_0_0_0_0(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '0.0.0.0'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s when we sent join with source IP  is 0.0.0.0' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_join_sourceip_0_0_0_0(self):
-        df = defer.Deferred()
-        def igmp_join_sourceip_0_0_0_0():
-              self.igmp_join_sourceip_0_0_0_0(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_join_sourceip_0_0_0_0)
-        return df
-
-    def igmp_invalid_join_packet(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MINVALIDGROUP1,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_invalid_join_packet(self):
-        df = defer.Deferred()
-        def igmp_invalid_join_packet():
-              self.igmp_invalid_join_packet(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_invalid_join_packet)
-        return df
-
-    def igmp_join_data_receiving_during_subscriber_link_toggle(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups,  before bring down the self.V_INF1=%s  ' %self.V_INF1)
-        os.system('ifconfig '+self.V_INF1+' down')
-        log_test.info(' the self.V_INF1 %s is down now  ' %self.V_INF1)
-        os.system('ifconfig '+self.V_INF1)
-        time.sleep(10)
-        os.system('ifconfig '+self.V_INF1+' up')
-        os.system('ifconfig '+self.V_INF1)
-        log_test.info(' the self.V_INF1 %s is up now  ' %self.V_INF1)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s when we nterface up after down  ' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_join_data_received_during_subscriber_link_toggle(self):
-        df = defer.Deferred()
-        def igmp_join_data_received_during_subscriber_link_toggle():
-              self.igmp_join_data_received_during_subscriber_link_toggle(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_join_data_received_during_subscriber_link_toggle)
-        return df
-
-    def igmp_join_data_received_during_channel_distributor_link_toggle(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5', '6.6.6.6']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac1 = '01:00:5e:01:02:03'
-        dst_mac2 = '01:00:5e:02:02:03'
-        src_ip2 = '5.5.5.5'
-        src_ip1 = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        igmpState2 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac1,
-                                     src_ip = src_ip1, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic2 = McastTraffic(groups2, iface= 'veth3', dst_mac = dst_mac2,
-                                     src_ip = src_ip2,  cb = self.send_mcast_cb, arg = igmpState2)
-        mcastTraffic1.start()
-        mcastTraffic2.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        join_state2 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state2)
-        mcastTraffic1.stop()
-        os.system('ifconfig '+'veth2'+' down')
-        os.system('ifconfig '+'veth2')
-        time.sleep(10)
-        self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        os.system('ifconfig '+'veth2'+' up')
-        os.system('ifconfig '+'veth2')
-        time.sleep(10)
-        mcastTraffic1.start()
-        self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state2)
-        mcastTraffic2.stop()
-
-    ##  This test case is failing to receive traffic from multicast data from defferent channel interfaces TO-DO
-    ###### TO DO scenario #######
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
-    def test_igmp_join_data_received_during_channel_distributors_link_toggle(self):
-        df = defer.Deferred()
-        def igmp_join_data_receiving_during_channel_distributor_link_toggle():
-              self.igmp_join_data_received_during_channel_distributor_link_toggle(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_join_data_receiving_during_channel_distributor_link_toggle)
-        return df
-
-    def igmp_invalidClassD_IP_join_packet(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MINVALIDGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_invalid_class_d_ip_for_join_packet(self):
-        df = defer.Deferred()
-        def igmp_invalidClass_D_IP_join_packet():
-              self.igmp_invalidClassD_IP_join_packet(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_invalidClass_D_IP_join_packet)
-        return df
-
-    def igmp_invalidClassD_IP_as_srclistIP_join_packet(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['239.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s when we sent invalid join packet ' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
-    def test_igmp_invalid_class_d_ip_as_srclist_ip_for_join_packet(self):
-        df = defer.Deferred()
-        def igmp_invalidClassD_IP_as_srclistIP_join_packet():
-              self.igmp_invalidClassD_IP_as_srclistIP_join_packet(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_invalidClassD_IP_as_srclistIP_join_packet)
-        return df
-
-    def igmp_general_query_recv_packet(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        ip_src = '1.1.1.1'
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
-        ip_src = self.IP_SRC
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        log_test.info('Started delay to verify multicast data taraffic for group %s is received or not for 180 sec ' %groups2)
-        time.sleep(100)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Verified that  multicast data for group %s is received after 100 sec ' %groups2)
-        time.sleep(50)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Verified that  multicast data for group %s is received after 150 sec ' %groups2)
-        time.sleep(30)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Verified that  multicast data for group %s is received after 180 sec ' %groups2)
-        time.sleep(10)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Verified that  multicast data for group %s is received after 190 sec ' %groups2)
-        target3 = mcastTraffic1.isRecvStopped()
-        assert target3==False, 'EXPECTED FAILURE'
-        log_test.info('Verified that multicast data for a group %s is still transmitting from a data interface' %groups2)
-        log_test.info('Now checking join interface is receiving a multicast data for group %s after 190 sec' %groups2)
-        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target1==1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving multicast data for group %s' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+250)
-    def test_igmp_general_query_received_traffic(self):
-        df = defer.Deferred()
-        def igmp_general_query_recv_packet():
-              self.igmp_general_query_recv_packet(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_general_query_recv_packet)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+100)
-    def test_igmp_query_received_on_joining_interface(self):
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_leave(stateList, leave_groups)
-                self.df.callback(0)
-
-        log_test.info('Sending join packet and expect to receive on general query packet after 60 sec for multicast %s ' %groups)
-        self.send_igmp_join_recvQuery(groups)
-        log_test.info('Received a general query packet for multicast %s group on joing interface and sending traffic' %groups)
-        mcastTraffic.start()
-        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
-        reactor.callLater(0, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
-    def test_igmp_for_periodic_query_received_on_joining_interface(self):
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_leave(stateList, leave_groups)
-                self.df.callback(0)
-
-        self.send_igmp_join_recvQuery(groups,3)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
-    def test_igmp_for_periodic_query_received_and_checking_entry_deleted(self):
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_leave(stateList, leave_groups)
-                self.df.callback(0)
-
-        self.send_igmp_join_recvQuery(groups,3)
-        log_test.info('Received periodic general query packets for multicast %s, now checking entry is deleted from tabel by sending traffic for that group' %groups)
-        mcastTraffic.start()
-        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
-        reactor.callLater(0, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
-    def test_igmp_member_query_interval_and_expiry_for_rejoining_interface(self):
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_leave(stateList, leave_groups)
-                self.df.callback(0)
-
-        self.send_igmp_join_recvQuery(groups,3)
-        log_test.info('Received periodic general query packets for multicast %s, now sending join packet again and verifying traffic for that group is received or not on joining interface' %groups)
-        self.send_igmp_join(groups)
-        mcastTraffic.start()
-        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
-        reactor.callLater(0, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+50)
-    def test_igmp_leave_received_group_and_source_specific_query(self):
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_leave(stateList, leave_groups)
-                self.df.callback(0)
-
-        self.send_igmp_join(groups)
-        self.send_igmp_leave_listening_group_specific_query(leave_groups, delay = 3)
-        return df
-
-    def igmp_change_to_exclude_src_list_check_for_group_source_specific_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
-        time.sleep(10)
-        target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target2 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
-    def test_igmp_change_to_exclude_src_list_and_check_for_group_source_specific_query(self):
-        df = defer.Deferred()
-        def igmp_change_to_exclude_src_list_check_for_group_source_specific_query():
-              self.igmp_change_to_exclude_src_list_check_for_group_source_specific_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_change_to_exclude_src_list_check_for_group_source_specific_query)
-        return df
-
-    def igmp_change_to_include_src_list_check_for_general_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
-                             iface = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
-                                                   (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['6.6.6.6', '5.5.5.5']),
-                                                    intf = self.V_INF1, delay = 2,query_group1 = 'group1', query_group2 = None)
-        time.sleep(10)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s after send Change to include message' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
-    def test_igmp_change_to_include_src_list_and_check_for_general_query(self):
-        df = defer.Deferred()
-        def igmp_change_to_include_src_list_check_for_general_query():
-              self.igmp_change_to_include_src_list_check_for_general_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_change_to_include_src_list_check_for_general_query)
-        return df
-
-    def igmp_allow_new_src_list_check_for_general_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.igmp_send_joins_different_groups_srclist(groups1+groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '6.6.6.6'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,                                                                              (['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                              intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s after sending join with new source list' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
-    def test_igmp_allow_new_src_list_and_check_for_general_query(self):
-        df = defer.Deferred()
-        def igmp_allow_new_src_list_check_for_general_query():
-              self.igmp_allow_new_src_list_check_for_general_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_allow_new_src_list_check_for_general_query)
-        return df
-
-    def igmp_block_old_src_list_check_for_group_source_specific_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        groups = groups1 + groups2
-        self.igmp_send_joins_different_groups_srclist(groups,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:02:02:03'
-        src_ip = '5.5.5.5'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups2, df = df)
-        IGMPTestState(groups = groups2, df = df)
-        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups2)
-        self.igmp_recv_task(self.V_INF1, groups2, join_state1)
-        log_test.info('Interface is receiving traffic from multicast groups %s' %groups2)
-        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups,
-                                                (['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
-                                                intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
-        target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
-        assert target2 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s after sending join with block old source list' %groups2)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
-    def test_igmp_block_old_src_list_and_check_for_group_source_specific_query(self):
-        df = defer.Deferred()
-        def igmp_block_old_src_list_check_for_group_source_specific_query():
-              self.igmp_block_old_src_list_check_for_group_source_specific_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_block_old_src_list_check_for_group_source_specific_query)
-        return df
-
-    def igmp_include_to_allow_src_list_check_for_general_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,(['2.2.2.2', '3.3.3.3', '4.4.4.4', '6.6.6.6'], ['2.2.2.2', '5.5.5.5']),                                               intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
-    def test_igmp_include_to_allow_src_list_and_check_for_general_query(self):
-        df = defer.Deferred()
-        def igmp_include_to_allow_src_list_check_for_general_query():
-              self.igmp_include_to_allow_src_list_check_for_general_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_include_to_allow_src_list_check_for_general_query)
-        return df
-
-    def igmp_include_to_block_src_list_check_for_group_source_specific_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
-                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
-                                                      intf = self.V_INF1, delay = 2)
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['6.6.6.6','7.7.7.7'],
-                             iface = self.V_INF1, delay = 2)
-        self.igmp_recv_task(self.V_INF1, groups1, join_state1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
-    def test_igmp_include_to_block_src_list_and_check_for_group_source_specific_query(self):
-        df = defer.Deferred()
-        def igmp_include_to_block_src_list_check_for_group_source_specific_query():
-              self.igmp_include_to_block_src_list_check_for_group_source_specific_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_include_to_block_src_list_check_for_group_source_specific_query)
-        return df
-
-    def igmp_exclude_to_allow_src_list_check_for_general_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        groups2 = (self.MGROUP2,)
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
-                             iface = self.V_INF1, delay = 2)
-
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
-                                             (['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']),                                                                 intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
-    def test_igmp_exclude_to_allow_src_list_and_check_for_general_query(self):
-        df = defer.Deferred()
-        def igmp_exclude_to_allow_src_list_check_for_general_query():
-              self.igmp_exclude_to_allow_src_list_check_for_general_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_exclude_to_allow_src_list_check_for_general_query)
-        return df
-
-    def igmp_exclude_to_block_src_list_check_for_group_source_specific_query(self, df = None):
-        groups1 = (self.MGROUP1,)
-        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
-                             iface = self.V_INF1, delay = 2)
-
-        dst_mac = '01:00:5e:01:02:03'
-        src_ip = '2.2.2.2'
-        if df is None:
-              df = defer.Deferred()
-        igmpState1 = IGMPTestState(groups = groups1, df = df)
-        IGMPTestState(groups = groups1, df = df)
-        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
-                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
-        mcastTraffic1.start()
-        join_state1 = IGMPTestState(groups = groups1)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        self.send_igmp_leave_listening_group_specific_query(groups = groups1,
-                                          src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
-                                          iface = self.V_INF1, delay = 2)
-        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
-        assert target1 == 1, 'EXPECTED FAILURE'
-        log_test.info('Interface is not receiving traffic from multicast groups %s' %groups1)
-        mcastTraffic1.stop()
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
-    def test_igmp_exclude_to_block_src_list_and_check_for_group_source_specific_query(self):
-        df = defer.Deferred()
-        def igmp_exclude_to_block_src_list_check_for_group_source_specific_query():
-              self.igmp_exclude_to_block_src_list_check_for_group_source_specific_query(df = df)
-              df.callback(0)
-        reactor.callLater(0, igmp_exclude_to_block_src_list_check_for_group_source_specific_query)
-        return df
-
-    def iptomac(self, mcast_ip):
-        mcast_mac =  '01:00:5e:'
-        octets = mcast_ip.split('.')
-        second_oct = int(octets[1]) & 127
-        third_oct = int(octets[2])
-        fourth_oct = int(octets[3])
-        mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
-        return mcast_mac
-
-    def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
-        dst_mac = self.iptomac(group)
-        eth = Ether(dst= dst_mac)
-        ip = IP(dst=group,src=source)
-        data = repr(monotonic.monotonic())
-        sendp(eth/ip/data,count=20, iface = intf)
-
-    def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
-        log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
-        self.success = False
-        def recv_task():
-            def igmp_recv_cb(pkt):
-                #log_test.info('received multicast data packet is %s'%pkt.show())
-                log_test.info('multicast data received for group %s from source %s'%(group,source))
-                self.success = True
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
-        t = threading.Thread(target = recv_task)
-        t.start()
-        self.send_multicast_data_traffic(group,source=source)
-        t.join()
-        return self.success
-
-    def test_igmp_include_exclude_modes(self):
-        groups = ['224.2.3.4','230.5.6.7']
-        src_list = ['2.2.2.2','3.3.3.3']
-        self.onos_ssm_table_load(groups, src_list=src_list)
-        self.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        self.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                             iface = self.V_INF1, delay = 2)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True)
-        status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
-        assert_equal(status,False)
-
-    def test_igmp_allow_new_source_mode(self):
-        group = ['224.8.9.3']
-        src_list = ['2.2.2.2','3.3.3.3']
-        #dst_mac = self.iptomac(group[0])
-        self.onos_ssm_table_load(group, src_list)
-        self.send_igmp_join(groups = group, src_list = src_list[0],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(group[0], intf=self.V_INF1,source = src_list[0])
-        assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-        self.send_igmp_join(groups = group, src_list = src_list[1],record_type = IGMP_V3_GR_TYPE_ALLOW_NEW,
-                            iface = self.V_INF1, delay = 1)
-        for src in src_list:
-            status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
-            assert_equal(status,True) # expecting igmp data traffic from both sources
-
-
-    def test_igmp_include_to_exclude_mode_change(self):
-        group = ['224.2.3.4']
-        src_list = ['2.2.2.2','3.3.3.3']
-        self.onos_ssm_table_load(group, src_list)
-        self.send_igmp_join(groups = group, src_list = src_list[0],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src_list[0])
-        assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-        self.send_igmp_join(groups = group, src_list = src_list[1],record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                           iface = self.V_INF1, delay = 1)
-        for src in src_list:
-            status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src)
-            assert_equal(status,False) # expecting igmp data traffic from both sources
-
-    def test_igmp_exclude_to_include_mode_change(self):
-        group = ['224.2.3.4']
-        src = ['2.2.2.2']
-        self.onos_ssm_table_load(group, src)
-        self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
-        assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
-        self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source = src[0])
-        assert_equal(status,True) # expecting igmp data traffic from both sources
-
-    #this test case wotks properly if the snooping device(ONOS) have multicast router connected.
-    def test_igmp_to_include_mode_with_null_source(self):
-        groups = ['224.2.3.4','230.7.9.8']
-        src = ['192.168.12.34']
-        dst_mac = []
-        dst_mac.append(self.iptomac(groups[0]))
-        dst_mac.append(self.iptomac(groups[1]))
-        self.onos_ssm_table_load(groups, src)
-        self.send_igmp_join(groups = groups, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        for grp in groups:
-            status = self.verify_igmp_data_traffic(grp,intf=self.V_INF1,source= src[0])
-            assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
-        #sending leave packet for group groups[1]
-        self.send_igmp_join(groups = [groups[1]], src_list = [],record_type = IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        for grp in groups:
-            status = self.verify_igmp_data_traffic(grp,intf=self.V_INF1,source= src[0])
-            if grp is groups[0]:
-                assert_equal(status,True) # expecting igmp data traffic to group groups[0]
-            else:
-                assert_equal(status,False) # not expecting igmp data traffic to group groups[1]
-
-    def test_igmp_to_include_mode(self):
-        group = ['229.9.3.6']
-        src_list = ['192.168.12.34','192.18.1.34']
-        self.onos_ssm_table_load(group, src_list)
-        self.send_igmp_join(groups = group, src_list = [src_list[0]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src_list[0])
-        assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
-        self.send_igmp_join(groups = group, src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        for src in src_list:
-            status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source= src)
-            assert_equal(status,True) # expecting igmp data traffic to group groups[0]
-
-    #this test case passed only if mulitcast router connected to ONOS.
-    def test_igmp_blocking_old_source_mode(self):
-        group = ['224.2.3.4']
-        src_list = ['2.2.2.2','3.3.3.3']
-        self.onos_ssm_table_load(group, src_list)
-        self.send_igmp_join(groups = group, src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        for src in src_list:
-            status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
-            assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-        self.send_igmp_join(groups = group, src_list = [src_list[1]],record_type = IGMP_V3_GR_TYPE_BLOCK_OLD,
-                             iface = self.V_INF1, delay = 1)
-        for src in src_list:
-            status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1, source=src)
-            if src is src_list[0]:
-                assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-            else:
-                assert_equal(status,False) # not expecting igmp data traffic from source src_list[1]
-
-    def test_igmp_multiple_joins_and_data_verification_with_100_groups(self):
-        groups = []
-	sources = []
-	count = 1
-	mcastips = self.mcast_ip_range(start_ip = '226.0.0.1',end_ip = '226.0.5.254')
-	sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.5.254')
-        while count<=100:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-	    if group in groups:
-		pass
-	    else:
-		log_test.info('group and source are %s and %s'%(group,source))
-		groups.append(group)
-                sources.append(source)
-		count += 1
-	self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-	for i in range(100):
-	    self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-            status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, True)
-	    log_test.info('data received for group %s from source %s'%(groups[i],sources[i]))
-
-    def test_igmp_multiple_joins_with_data_verification_and_leaving_100_groups(self):
-        groups = []
-        sources = []
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '226.0.0.1',end_ip = '226.0.5.254')
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.5.254')
-        while count<=100:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group and source are %s and %s'%(group,source))
-                groups.append(group)
-                sources.append(source)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-        for i in range(100):
-            self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-            status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, True)
-            log_test.info('data received for group %s from source %s'%(groups[i],sources[i]))
-	    self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
-                                         iface = self.V_INF1, delay = 1)
-	    status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, False)
-            log_test.info("data not received for group %s from source %s after changing group mode to 'TO-EXCLUDE' mode"%(groups[i],sources[i]))
-
-    def test_igmp_group_source_for_only_config_with_1000_entries(self):
-        groups = []
-        sources = []
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.50.254')
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.50.254')
-        while count<=1000:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group and source are %s and %s'%(group,source))
-                groups.append(group)
-                sources.append(source)
-                count += 1
-	self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-
-    def test_igmp_from_exclude_to_include_mode_with_100_groups(self):
-        groups = []
-        sources = []
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.10.254')
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.10.254')
-        while count<=100:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group and source are %s and %s'%(group,source))
-                groups.append(group)
-                sources.append(source)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-        for i in range(100):
-            self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                                         iface = self.V_INF1)
-            status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, False)
-	    log_test.info('data not received for group %s from source %s as expected'%(groups[i],sources[i]))
-	    self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-	    status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-	    assert_equal(status, True)
-            log_test.info("data received for group %s from source %s after changing group mode to 'TO-INCLUDE' mode"%(groups[i],sources[i]))
-
-    def test_igmp_with_multiple_joins_and_data_verify_with_1000_groups(self):
-        groups = []
-        sources = []
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = '229.0.30.254')
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = '10.10.30.254')
-        while count<=1000:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group and source are %s and %s'%(group,source))
-                groups.append(group)
-                sources.append(source)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-        for i in range(1000):
-            self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-            status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, True)
-            log_test.info('data received for group %s from source %s - %d'%(groups[i],sources[i],i))
-
-    def test_igmp_with_multiple_joins_and_data_verify_with_5000_groups(self):
-        groups = []
-        sources = []
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '231.39.19.121',end_ip = '231.40.30.25')
-        sourceips = self.source_ip_range(start_ip = '192.168.56.43',end_ip = '192.169.110.30')
-        while count<=5000:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group and source are %s and %s'%(group,source))
-                groups.append(group)
-                sources.append(source)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-        for i in range(5000):
-            self.send_igmp_join(groups = [groups[i]], src_list = [sources[i]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-            status = self.verify_igmp_data_traffic(groups[i],intf=self.V_INF1,source=sources[i])
-            assert_equal(status, True)
-            log_test.info('data received for group %s from source %s - %d'%(groups[i],sources[i],i))
-
-    """def test_igmp_join_from_multiple_infts(self):
-        groups = ['229.9.3.6','234.20.56.2']
-        src_list = ['192.168.12.34','192.18.1.34']
-        self.onos_ssm_table_load(groups, src_list=src_list)
-        self.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = 'veth0')
-	self.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = 'veth2')
-        status = self.verify_igmp_data_traffic(groups[0],intf='veth0',source=src_list[0])
-	assert_equal(status,True)
-	status = self.verify_igmp_data_traffic(groups[1],intf='veth2',source=src_list[1])
-        assert_equal(status,True) # not expecting igmp data traffic from source src_list[0]
-    """
-
-    def test_igmp_send_data_to_non_registered_group(self):
-        group = ['224.2.3.4']
-        src = ['2.2.2.2']
-        self.onos_ssm_table_load(group,src_list= src)
-        self.send_igmp_join(groups = ['239.0.0.1'], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic('239.0.0.1',intf=self.V_INF1,source=src[0])
-        assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
-
-    def test_igmp_traffic_verification_for_registered_group_with_no_join_sent(self):
-        group = ['227.12.3.40']
-        src = ['190.4.19.67']
-        self.onos_ssm_table_load(group,src_list= src)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
-        assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
-
-    def test_igmp_toggling_app_activation(self):
-        group = [self.random_mcast_ip()]
-        src = [self.randomsourceip()]
-        self.onos_ssm_table_load(group,src_list= src)
-	self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1)
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
-        assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-	log_test.info('Multicast traffic received for group %s from source %s before the app is deactivated'%(group[0],src[0]))
-	self.onos_ctrl.deactivate()
-	status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
-        assert_equal(status,False) #not expecting igmp data traffic from source src_list[0]
-	log_test.info('Multicast traffic not received for group %s from source %s after the app is deactivated'%(group[0],src[0]))
-	self.onos_ctrl.activate()
-        status = self.verify_igmp_data_traffic(group[0],intf=self.V_INF1,source=src[0])
-        assert_equal(status,True) # expecting igmp data traffic from source src_list[0]
-	log_test.info('Multicast traffic received for group %s from source %s the app is re-activated'%(group[0],src[0]))
-
-    def test_igmp_with_mismatch_for_dst_ip_and_mac_in_data_packets(self):
-        group = ['228.18.19.29']
-        source = [self.randomsourceip()]
-        self.onos_ssm_table_load(group,src_list= source)
-	self.send_igmp_join(groups = group, src_list = source,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1)
-        dst_mac = '01:00:5e:0A:12:09'
-        eth = Ether(dst= dst_mac)
-        ip = IP(dst=group[0],src=source[0])
-        data = repr(monotonic.monotonic())
-        pkt = (eth/ip/data)
-        log_test.info('Multicast traffic packet %s'%pkt.show())
-	self.success = False
-        def recv_task():
-            def igmp_recv_cb(pkt):
-                #log_test.info('received multicast data packet is %s'%pkt.show())
-                log_test.info('multicast data received for group %s from source %s'%(group[0],source[0]))
-                self.success = True
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group[0] and p[IP].src == source[0], count=1,timeout = 2, iface='veth0')
-        t = threading.Thread(target = recv_task)
-        t.start()
-        sendp(eth/ip/data,count=20, iface = 'veth2')
-        t.join()
-        assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
-
-    #test case failing, ONOS registering unicast ip also as an igmp join
-    def test_igmp_registering_invalid_group(self):
-        groups = ['218.18.19.29']
-        source = [self.randomsourceip()]
-	ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
-	ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
-	for g in groups:
-            for s in source:
-                d = {}
-                d['source'] = s or '0.0.0.0'
-                d['group'] = g
-                ssm_xlate_list.append(d)
-	    log_test.info('onos load config is %s'%ssm_dict)
-            status, code = OnosCtrl.config(ssm_dict)
-        self.send_igmp_join(groups, src_list = source, record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = self.V_INF1, delay = 1)
-        status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1, source=source[0])
-        assert_equal(status,False) # not expecting igmp data traffic from source src_list[0]
-
-    def test_igmp_registering_invalid_source(self):
-        groups = [self.random_mcast_ip()]
-        sources = ['224.10.28.34','193.73.219.257']
-        ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
-        ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
-        for g in groups:
-            for s in sources:
-                d = {}
-                d['source'] = s or '0.0.0.0'
-                d['group'] = g
-                ssm_xlate_list.append(d)
-            log_test.info('onos load config is %s'%ssm_dict)
-            status, code = OnosCtrl.config(ssm_dict)
-            assert_equal(status,False)
diff --git a/src/test/igmpproxy/__init__.py b/src/test/igmpproxy/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/igmpproxy/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/igmpproxy/igmpproxyTest.json b/src/test/igmpproxy/igmpproxyTest.json
deleted file mode 100644
index 1cba515..0000000
--- a/src/test/igmpproxy/igmpproxyTest.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-    "V_INF1" : "veth0",
-    "V_INF2" : "veth1",
-    "MGROUP1" : "239.1.2.3",
-    "MGROUP2" : "239.2.2.3",
-    "MINVALIDGROUP1" : "255.255.255.255",
-    "MINVALIDGROUP2" : "239.255.255.255",
-    "IGMP_DST_MAC" : "01:00:5e:00:00:16",
-    "IGMP_SRC_MAC" : "5a:e1:ac:ec:4d:a1",
-    "IP_SRC" : "1.2.3.4",
-    "IP_DST" : "224.0.0.22",
-    "NEGATIVE_TRAFFIC_STATUS" : 1,
-    "IGMP_TEST_TIMEOUT" : 5,
-    "IGMP_QUERY_TIMEOUT" : 60,
-    "MCAST_TRAFFIC_TIMEOUT" : 10,
-    "PORT_TX_DEFAULT" : 2,
-    "PORT_RX_DEFAULT" : 1,
-    "ROVER_TEST_TIMEOUT" : 300,
-    "ROVER_TIMEOUT" : 200,
-    "ROVER_JOIN_TIMEOUT" : 60
-}
diff --git a/src/test/igmpproxy/igmpproxyTest.py b/src/test/igmpproxy/igmpproxyTest.py
deleted file mode 100644
index b65987b..0000000
--- a/src/test/igmpproxy/igmpproxyTest.py
+++ /dev/null
@@ -1,1625 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from twisted.internet import defer
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from scapy.all import *
-from select import select as socket_select
-import time, monotonic
-import requests
-import os
-import random
-import threading
-from IGMP import *
-from McastTraffic import *
-from Stats import Stats
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from Channels import IgmpChannel
-from CordLogger import CordLogger
-from CordTestConfig import setup_module, teardown_module
-from onosclidriver import OnosCliDriver
-from CordTestUtils import get_mac, get_controller
-from portmaps import g_subscriber_port_map
-from CordTestUtils import log_test
-log_test.setLevel('INFO')
-
-class IGMPProxyTestState:
-
-      def __init__(self, groups = [], df = None, state = 0):
-            self.df = df
-            self.state = state
-            self.counter = 0
-            self.groups = groups
-            self.group_map = {} ##create a send/recv count map
-            for g in groups:
-                self.group_map[g] = (Stats(), Stats())
-
-      def update(self, group, tx = 0, rx = 0, t = 0):
-            self.counter += 1
-            index = 0 if rx == 0 else 1
-            v = tx if rx == 0 else rx
-            if self.group_map.has_key(group):
-                  self.group_map[group][index].update(packets = v, t = t)
-
-      def update_state(self):
-          self.state = self.state ^ 1
-
-class igmpproxy_exchange(CordLogger):
-
-    V_INF1 = 'veth0'
-    MGROUP1 = '239.1.2.3'
-    MGROUP2 = '239.2.2.3'
-    MINVALIDGROUP1 = '255.255.255.255'
-    MINVALIDGROUP2 = '239.255.255.255'
-    MMACGROUP1 = "01:00:5e:01:02:03"
-    MMACGROUP2 = "01:00:5e:02:02:03"
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.0.22'
-    NEGATIVE_TRAFFIC_STATUS = 1
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-    IGMP_TEST_TIMEOUT = 5
-    IGMP_QUERY_TIMEOUT = 60
-    MCAST_TRAFFIC_TIMEOUT = 20
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    max_packets = 100
-    MAX_PORTS = 100
-    proxy_app = 'org.opencord.igmpproxy'
-    mcast_app = 'org.opencord.mcast'
-    cord_config_app = 'org.opencord.config'
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    proxy_device_id = OnosCtrl.get_device_id()
-    controller = get_controller()
-    app_files = [os.path.join(test_path, '..', 'apps/cord-config-3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/olt-app-3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/mcast-1.3.0-SNAPSHOT.oar'), os.path.join(test_path, '..', 'apps/onos-app-igmpproxy-1.1.0-SNAPSHOT.oar')]
-    proxy_config_file = os.path.join(test_path, '..', 'igmpproxy/igmpproxyconfig.json')
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
-    ROVER_TEST_TIMEOUT = 300 #3600*86
-    ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
-    ROVER_JOIN_TIMEOUT = 60
-    VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-    configs = {}
-    proxy_interfaces_last = ()
-    interface_to_mac_map = {}
-    host_ip_map = {}
-    MAX_PORTS = 100
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.VOLTHA_ENABLED is False:
-            OnosCtrl.config_device_driver()
-            OnosCtrl.cord_olt_config(cls.olt)
-        time.sleep(2)
-	cls.uninstall_cord_config_app()
-	time.sleep(2)
-	cls.install_igmpproxy()
-	cls.igmp_proxy_setup()
-
-    @classmethod
-    def tearDownClass(cls):
-        if cls.VOLTHA_ENABLED is False:
-            OnosCtrl.config_device_driver(driver = 'ovs')
-	#cls.igmp_proxy_cleanup()
-
-    def setUp(self):
-        ''' Activate the igmp proxy app'''
-        super(igmpproxy_exchange, self).setUp()
-        self.igmp_channel = IgmpChannel()
-
-    def tearDown(self):
-	super(igmpproxy_exchange, self).tearDown()
-
-    @classmethod
-    def uninstall_cord_config_app(cls):
-        log_test.info('Uninstalling org.opencord.config 1.2 version app')
-        OnosCtrl(cls.cord_config_app).deactivate()
-        OnosCtrl.uninstall_app(cls.cord_config_app, onos_ip = cls.controller)
-
-    @classmethod
-    def install_igmpproxy(cls):
-        for app in cls.app_files:
-            OnosCtrl.install_app(app, onos_ip = cls.controller)
-	    OnosCtrl(app).activate()
-
-    @classmethod
-    def igmp_proxy_setup(cls):
-        did =  OnosCtrl.get_device_id()
-        cls.proxy_device_id = did
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        #log_test.info('port map is %s'%cls.port_map)
-        if cls.port_map:
-            ##Per subscriber, we use 1 relay port
-            try:
-                proxy_port = cls.port_map[cls.port_map['relay_ports'][0]]
-            except:
-                proxy_port = cls.port_map['uplink']
-            cls.proxy_interface_port = proxy_port
-            cls.proxy_interfaces = (cls.port_map[cls.proxy_interface_port],)
-        else:
-            cls.proxy_interface_port = 100
-            cls.proxy_interfaces = (g_subscriber_port_map[cls.proxy_interface_port],)
-        cls.proxy_interfaces_last = cls.proxy_interfaces
-        if cls.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in cls.port_map['ports']:
-                port_num = cls.port_map[port]
-                if port_num == cls.port_map['uplink']:
-                    continue
-                ip = cls.get_host_ip(port_num)
-                mac = cls.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure igmp proxy  virtual interface
-            proxy_ip = cls.get_host_ip(interface_list[0][0])
-            proxy_mac = cls.get_mac(cls.port_map[cls.proxy_interface_port])
-            interface_list.append((cls.proxy_interface_port, proxy_ip, proxy_mac))
-            cls.onos_interface_load(interface_list)
-
-    @classmethod
-    def igmp_proxy_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        for config in cls.configs.items():
-            OnosCtrl.delete(config)
-        # if cls.onos_restartable is True:
-        #     log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
-        #     return cord_test_onos_restart(config = {})
-
-    @classmethod
-    def onos_load_config(cls, config):
-        #log_test.info('onos load config is %s'%config)
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-    @classmethod
-    def onos_interface_load(cls, interface_list):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(cls.proxy_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        #cls.onos_load_config(interface_dict)
-        cls.configs['interface_config'] = interface_dict
-
-    @classmethod
-    def onos_igmp_proxy_config_load(cls, FastLeave = "false"):
-	#cls.proxy_interface_port = 12
-        proxy_connect_point = '{}/{}'.format(cls.proxy_device_id, cls.proxy_interface_port)
-        log_test.info('\nRelay interface port is %s'%cls.proxy_interface_port)
-        log_test.info('\nRelay interface is %s'%cls.port_map[cls.proxy_interface_port])
-        log_test.info('\nConnect point is %s'%proxy_connect_point)
-	cls.onos_load_config(cls.proxy_config_file,json_file=True)
-        igmpproxy_dict = { "apps": {
-                "org.onosproject.provider.lldp": {
-                        "suppression": {
-                                "deviceTypes": ["ROADM"],
-                                "annotation": "{\"no-lldp\":null}"
-                        }
-                },
-                "org.opencord.igmpproxy": {
-                        "igmpproxy": {
-                                "globalConnectPointMode": "true",
-                                "globalConnectPoint": proxy_connect_point,
-                                "UnsolicitedTimeOut": "2",
-                                "MaxResp": "10",
-                                "KeepAliveInterval": "120",
-                                "KeepAliveCount": "3",
-                                "LastQueryInterval": "2",
-                                "LastQueryCount": "2",
-                                "FastLeave": FastLeave,
-                                "PeriodicQuery": "true",
-                                "IgmpCos": "7",
-                                "withRAUpLink": "true",
-                                "withRADownLink": "true"
-                        }
-                },
-                "org.opencord.mcast": {
-                        "multicast": {
-                                "ingressVlan": "222",
-                                "egressVlan": "17"
-                        }
-                }
-           }
-	}
-
-	"""igmpproxy_dict = {'apps':{
-				'org.opencord.igmpproxy':{
-						'igmpproxy':
-                                                        {'globalConnectPointMode': 'true',
-                                                        'globalConnectPoint': proxy_connect_point,
-                                                        'UnsolicitedTimeOut': '2',
-                                                        'MaxResp': '10',
-                                                        'KeepAliveInterval': '120',
-                                                        'KeepAliveCount': '3',
-                                                        'LastQueryInterval': '2',
-                                                        'LastQueryCount': '2',
-                                                        'FastLeave': 'false',
-                                                        'PeriodicQuery': 'true',
-                                                        'IgmpCos': '7',
-                                                        'withRAUpLink': 'true',
-                                                        'withRADownLink': 'true'
-                                                        }
-                                                      },
-				 'org.opencord.mcast':{
-                                           'ingressVlan': '222',
-                                            'egressVlan': '17'
-                                        },
-                                    }
-				}"""
-	device_dict = {'devices':{
-                           cls.proxy_device_id: {
-                               'basic': {
-                                   'driver': 'default'
-                                },
-                                'accessDevice': {
-                                   'uplink': '2',
-                                   'vlan': '222',
-                                   'defaultVlan': '1'
-                                   }
-                                }
-			    }
-		      }
-	log_test.info('Igmp proxy dict is %s'%igmpproxy_dict)
-        cls.onos_load_config(igmpproxy_dict)
-	cls.onos_load_config(device_dict)
-        cls.configs['relay_config'] = igmpproxy_dict
-	cls.configs['device_config'] = device_dict
-
-    @classmethod
-    def get_host_ip(cls, port):
-        if cls.host_ip_map.has_key(port):
-            return cls.host_ip_map[port]
-        cls.host_ip_map[port] = '192.168.1.{}'.format(port)
-        return cls.host_ip_map[port]
-
-    @classmethod
-    def host_load(cls, iface):
-        '''Have ONOS discover the hosts for dhcp-relay responses'''
-        port = g_subscriber_port_map[iface]
-        host = '173.17.1.{}'.format(port)
-        cmds = ( 'ifconfig {} 0'.format(iface),
-                 'ifconfig {0} {1}'.format(iface, host),
-                 'arping -I {0} {1} -c 2'.format(iface, host),)
-                 #'ifconfig {} 0'.format(iface), )
-        for c in cmds:
-	    log_test.info('Host load config command %s'%c)
-            os.system(c)
-
-    @classmethod
-    def host_config_load(cls, host_config = None):
-        for host in host_config:
-            status, code = OnosCtrl.host_config(host)
-            if status is False:
-                log_test.info('JSON request returned status %d' %code)
-                assert_equal(status, True)
-
-    @classmethod
-    def generate_host_config(cls,ip,mac):
-        num = 0
-        hosts_dict = {}
-	hosts_list = [(ip,mac),]
-        for host, mac in hosts_list:
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.proxy_device_id), 'port': port}}
-            num += 1
-        return hosts_dict.values()
-
-
-    @classmethod
-    def get_mac(cls, iface):
-        if cls.interface_to_mac_map.has_key(iface):
-            return cls.interface_to_mac_map[iface]
-        mac = get_mac(iface, pad = 0)
-        cls.interface_to_mac_map[iface] = mac
-        return mac
-
-    def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
-          ssm_dict = {'apps' : { 'org.opencord.igmpproxy' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.opencord.igmpproxy']['ssmTranslate']
-	  if flag: #to maintain seperate group-source pair.
-	      for i in range(len(groups)):
-		  d = {}
-		  d['source'] = src_list[i] or '0.0.0.0'
-		  d['group'] = groups[i]
-		  ssm_xlate_list.append(d)
-	  else:
-              for g in groups:
-                  for s in src_list:
-                      d = {}
-                      d['source'] = s or '0.0.0.0'
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-	  log_test.info('ONOS ssm table config dictionary is %s'%ssm_dict)
-          self.onos_load_config(ssm_dict)
-          cord_port_map = {}
-          for g in groups:
-                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
-          self.igmp_channel.cord_port_table_load(cord_port_map)
-          time.sleep(2)
-
-    def random_mcast_ip(self,start_ip = '224.1.1.1', end_ip = '224.1.254.254'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-
-    def get_igmp_intf(self):
-        inst = os.getenv('TEST_INSTANCE', None)
-        if not inst:
-            return 'veth0'
-        inst = int(inst) + 1
-        if inst >= self.port_map['uplink']:
-            inst += 1
-        if self.port_map.has_key(inst):
-              return self.port_map[inst]
-        return 'veth0'
-
-    def igmp_verify_join(self, igmpStateList):
-        sendState, recvState = igmpStateList
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            tx = tx_stats.count
-            assert_greater(tx, 0)
-            rx_stats = recvState.group_map[g][1]
-            rx = rx_stats.count
-            assert_greater(rx, 0)
-            log_test.info('Receive stats %s for group %s' %(rx_stats, g))
-
-        log_test.info('IGMP test verification success')
-
-    def igmp_verify_leave(self, igmpStateList, leave_groups):
-        sendState, recvState = igmpStateList[0], igmpStateList[1]
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            rx_stats = recvState.group_map[g][1]
-            tx = tx_stats.count
-            rx = rx_stats.count
-            assert_greater(tx, 0)
-            if g not in leave_groups:
-                log_test.info('Received %d packets for group %s' %(rx, g))
-        for g in leave_groups:
-            rx = recvState.group_map[g][1].count
-            assert_equal(rx, 0)
-
-        log_test.info('IGMP test verification success')
-
-    def mcast_traffic_timer(self):
-          log_test.info('MCAST traffic timer expiry')
-          self.mcastTraffic.stopReceives()
-
-    def send_mcast_cb(self, send_state):
-        for g in send_state.groups:
-            send_state.update(g, tx = 1)
-        return 0
-
-    ##Runs in the context of twisted reactor thread
-    def igmp_recv(self, igmpState):
-        s = socket_select([self.recv_socket], [], [], 1.0)
-        if self.recv_socket in s[0]:
-              p = self.recv_socket.recv()
-              try:
-                    send_time = float(p.payload.load)
-                    recv_time = monotonic.monotonic()
-              except:
-                    log_test.info('Unexpected Payload received: %s' %p.payload.load)
-                    return 0
-              #log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
-              igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
-        return 0
-
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-	#log_test.info('sending igmp join packet %s'%pkt.show())
-        sendp(pkt, iface=iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-        self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
-              gr.sources = src_list
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        if rec_queryCount == None:
-            log_test.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
-            resp = srp1(pkt, iface=iface)
-        else:
-            log_test.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
-            resp = srp1(pkt, iface=iface)
-#       resp = srp1(pkt, iface=iface) if rec_queryCount else srp3(pkt, iface=iface)
-        resp[0].summary()
-        log_test.info('Sent IGMP join for group %s and received a query packet and  printing packet' %groups)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_leave(self, groups, src_list = [], ip_pkt = None, iface = 'veth0', delay = 2):
-	log_test.info('entering into igmp leave function')
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              #gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-	log_test.info('igmp leave packet is %s'%pkt.show())
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface = iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def verify_igmp_packets_on_proxy_interface(self,ip_dst=None,iface=None,count=1,positive_test = True):
-	log_test.info('positive test variable inside verify_igmp_packets_on_proxy_interface function is %s'%positive_test)
-	if not iface:
-		iface = self.proxy_interfaces[0]
-	if not ip_dst:
-		ip_dst = self.IP_DST
-        self.status = False if positive_test is True else True
-	#log_test.info('self.status is %s'%self.status)
-	try:
-	    def igmp_recv_cb(pkt):
-                log_test.info('igmp packet received on proxy interface %s'%pkt.show())
-                #log_test.info('igmp packet received on proxy interface %s'%pkt[Raw].show())
-                self.status = True if positive_test is True else False
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].proto == 2 and p[IP].dst==ip_dst, count=count, timeout = 5, iface=iface)
-	    log_test.info('self.status is %s'%self.status)
-            #assert_equal(self.status, True)
-	except Exception as error:
-	    log_test.info('Got Unexpected error %s'%error)
-	    raise
-        #assert_equal(self.status, True)
-
-    @deferred(30)
-    def test_igmpproxy_app_installation(self):
-        df = defer.Deferred()
-        def proxy_app_install(df):
-            self.uninstall_cord_config_app()
-	    auth = ('karaf','karaf')
-	    url = 'http://%s:8181/onos/v1/applications'.format(self.controller)
-	    for file in self.app_files:
-                with open(file, 'rb') as payload:
-                     res = requests.post(url,auth=auth,data=payload)
-                     assert_equal(res.ok, True)
-	    df.callback(0)
-        reactor.callLater(0, proxy_app_install, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_app_netcfg(self):
-        df = defer.Deferred()
-        def proxy_net_config(df):
-            auth = ('karaf','karaf')
-            net_cfg_url = 'http://172.17.0.2:8181/onos/v1/network/configuration/'.format(self.controller)
-            with open(self.proxy_config_file, 'rb') as payload:
-                 res = requests.post(net_cfg_url,auth=auth,data=payload)
-                 ssert_equal(res.ok, True)
-            df.callback(0)
-        reactor.callLater(0, proxy_net_config, df)
-        return df
-
-    @deferred(15)
-    def test_igmpproxy_for_first_join(self,iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-   	    self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-	    try:
-                t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                t.start()
-	        self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                t.join()
-	        assert_equal(self.status, True)
-	    except Exception as error:
-		log_test.info('Igmp packet sent from subscriber interface, not received on proxy interface %s'%error)
-		raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(20)
-    def test_igmpproxy_for_two_joins_with_different_igmp_groups(self,iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-    	    for group in groups:
-	        try:
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-		except Exception as error:
-                    log_test.info('Igmp packet sent from subscriber interface, not received on proxy interface %s'%error)
-                    raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_for_igmp_join_with_proxy_app_deactivation(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-	    try:
-		for group in groups:
-		    positive_test = True if group is groups[0] else False
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface,kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = [groups[0]], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-		    OnosCtrl(self.proxy_app).deactivate()
-		    time.sleep(1)
-            except Exception as error:
-                log_test.info('Igmp packet sent from subscriber interface, not received on proxy interface %s'%error)
-                raise
-	    OnosCtrl(self.proxy_app).activate()
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_for_igmp_join_with_mcast_app_deactivation(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            try:
-                for group in groups:
-                    positive_test = True if group is groups[0] else False
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface,kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-                    OnosCtrl(self.mcast_app).deactivate()
-                    time.sleep(1)
-            except Exception as error:
-                log_test.info('Igmp packet sent from subscriber interface, not received on proxy interface %s'%error)
-                raise
-	    OnosCtrl(self.mcast_app).activate()
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(20)
-    def test_igmpproxy_for_igmp_joins_on_non_proxy_interface(self, iface='veth0', non_proxy_iface='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-	    try:
-                t1 = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                t2 = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface,kwargs = {'iface':non_proxy_iface,'positive_test':False})
-                t1.start()
-                t2.start()
-                self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                         iface = iface)
-                t1.join()
-		assert_equal(self.status, True)
-                t2.join()
-		assert_equal(self.status, True)
-            except Exception as error:
-                log_test.info('Igmp packet sent from subscriber interface, not received on proxy interface %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(25)
-    def test_igmpproxy_sending_group_specific_query_receiving_igmp_leave(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-	    try:
-                self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                     iface = iface)
-		time.sleep(1)
-		t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'ip_dst':group[0], 'iface':iface})
-	        t.start()
-	        self.send_igmp_leave(group, src_list= [], delay=10, iface = iface)
-	        t.join()
-		assert_equal(self.status, True)
-            except Exception as error:
-                log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(40)
-    def test_igmpproxy_verifying_group_specific_query_when_two_subscribers_leave_same_multicast_group_one_after_other(self,iface1='veth0',iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-            try:
-                self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                          delay=1,iface = iface1)
-                self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                         delay=1,iface = iface2)
-                for iface in [iface1, iface2]:
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'ip_dst':group[0], 'iface':iface})
-                    t.start()
-                    time.sleep(1)
-                    self.send_igmp_leave(group, src_list= [], delay=10, iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-            except Exception as error:
-                log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(60)
-    def test_igmpproxy_verifying_group_specific_query_sent_for_all_the_groups_after_subscriber_leaves(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip(), self.random_mcast_ip(), self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            try:
-		self.send_igmp_join(groups = groups, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                          delay=1,iface = iface)
-		threads = []
-		for group in groups:
-                    threads.append(threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'ip_dst':group, 'iface':iface, 'count':len(groups)}))
-                for thread in threads:
-		    thread.start()
-                time.sleep(1)
-                self.send_igmp_leave(groups, src_list= [], delay=11, iface = iface)
-		for thread in threads:
-                    thread.join()
-                    assert_equal(self.status, True)
-            except Exception as error:
-                log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(25)
-    def test_igmpproxy_fast_leave(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load(FastLeave='true')
-            self.onos_ssm_table_load(group,src_list=src)
-            try:
-                self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                     iface = iface)
-                time.sleep(1)
-                t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'positive_test':False, 'ip_dst':group[0], 'iface':iface})
-                t.start()
-                self.send_igmp_leave(group, src_list= [], delay=10, iface = iface)
-                t.join()
-                assert_equal(self.status, True)
-            except Exception as error:
-                log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_for_igmp_join_for_same_group_with_different_source(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            sources = [self.randomsourceip(),self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=sources)
-	    try:
-                for source in sources:
-                    positive_test = True if source is sources[0] else False
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface,kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = group, src_list = source, record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-                    time.sleep(1)
-            except:
-		log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(20)
-    def test_igmpproxy_after_proxy_interface_toggles(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = self.random_mcast_ip()
-	    group2 = self.random_mcast_ip()
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load([group,group2],src_list=src)
-            for toggle in ['Up','Down']:
-                if toggle == 'Down':
-                    log_test.info('Toggling proxy interface ')
-                    os.system('ifconfig {} down'.format(self.proxy_interfaces[0]))
-		    time.sleep(1)
-                    os.system('ifconfig {} up'.format(self.proxy_interfaces[0]))
-		    time.sleep(1)
-		    group = group2
-		try:
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-		except:
-		    log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-		    raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(20)
-    def test_igmpproxy_after_subscriber_interface_toggles(self,iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = self.random_mcast_ip()
-            group2 = self.random_mcast_ip()
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load([group,group2],src_list=src)
-            for toggle in ['Up','Down']:
-                if toggle == 'Down':
-                    log_test.info('Toggling subscriber interface ')
-                    os.system('ifconfig {} down'.format(iface))
-                    time.sleep(1)
-                    os.system('ifconfig {} up'.format(iface))
-                    time.sleep(1)
-                    group = group2
-                try:
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                             iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-                except:
-		    log_test.info('Igmp query not received on subscriber interface in response to leave sent %s'%error)
-                    raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(20)
-    def test_igmpproxy_with_join_and_verify_traffic(self):
-        group = [self.random_mcast_ip()]
-        src = [self.randomsourceip()]
-        self.onos_igmp_proxy_config_load()
-        self.onos_ssm_table_load(group,src_list=src)
-        df = defer.Deferred()
-        igmpState = IGMPProxyTestState(groups = group, df = df)
-        igmpStateRecv = IGMPProxyTestState(groups = group, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb, arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = rx_intf, type = ETH_P_IP)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                #log_test.info('Sending IGMP leave for groups: %s' %groups)
-                self.send_igmp_leave(group , iface = rx_intf, delay = 2)
-                self.recv_socket.close()
-                self.igmp_verify_join(stateList)
-                self.df.callback(0)
-        self.send_igmp_join(group, iface = rx_intf)
-        mcastTraffic.start()
-        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
-        reactor.callLater(0, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(50)
-    def test_igmpproxy_with_two_subscribers_joining_same_igmp_group_verifying_traffic(self, iface1='veth0', iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-            igmpState = IGMPProxyTestState(groups = group, df = df)
-            IGMPProxyTestState(groups = group, df = df)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                   arg = igmpState)
-            mcastTraffic.start()
-            time.sleep(1)
-            join_state = IGMPProxyTestState(groups = group)
-	    try:
-		for iface in [iface1, iface2]:
-		    positive_test = True if iface is iface1 else False
-	            log_test.info('iface is %s'%iface)
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              iface = iface)
-                    t.join()
-		    assert_equal(self.status, True)
-                    status = self.igmp_recv_task(iface, group, join_state)
-            except Exception as error:
-		log_test.info('Got some unexpected error %s'%error)
-                raise
-	    mcastTraffic.stop()
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_with_two_subscribers_joining_different_igmp_group_verifying_traffic(self, iface1='veth0', iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-	    try:
-		for group in groups:
-                    igmpState = IGMPProxyTestState(groups = [group], df = df)
-                    IGMPProxyTestState(groups = [group], df = df)
-                    mcastTraffic = McastTraffic([group], iface= tx_intf, cb = self.send_mcast_cb,
-                                   arg = igmpState)
-                    mcastTraffic.start()
-                    time.sleep(1)
-                    join_state = IGMPProxyTestState(groups = [group])
-		    iface = iface1 if group is groups[0] else iface2
-		    log_test.info('iface is %s and group is %s'%(iface,group))
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-		    status = self.igmp_recv_task(iface, [group], join_state)
-		    mcastTraffic.stop()
-            except:
-		log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_with_leave_and_verify_traffic(self):
-        group = [self.random_mcast_ip()]
-	self.onos_igmp_proxy_config_load()
-	self.onos_ssm_table_load(group)
-        df = defer.Deferred()
-        igmpState = IGMPProxyTestState(groups = group, df = df)
-        IGMPProxyTestState(groups = group, df = df)
-        tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                    arg = igmpState)
-	mcastTraffic.start()
-        t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-        t.start()
-	self.send_igmp_join(group, iface = rx_intf,delay=1)
-        t.join()
-        assert_equal(self.status, True)
-        join_state = IGMPProxyTestState(groups = group)
-        status = self.igmp_recv_task(rx_intf, group, join_state)
-	self.send_igmp_leave(group, delay = 10, iface = rx_intf)
-	join_state = IGMPProxyTestState(groups = group)
-	status = self.igmp_not_recv_task(rx_intf, group, join_state)
-	log_test.info('verified status for igmp recv task %s'%status)
-	assert status == 1 , 'EXPECTED RESULT'
-	df.callback(0)
-        return df
-
-    @deferred(30)
-    def test_igmpproxy_data_traffic_for_non_joined_group(self):
-        groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-        src = [self.randomsourceip()]
-        self.onos_igmp_proxy_config_load()
-        self.onos_ssm_table_load(groups,src_list=src)
-        df = defer.Deferred()
-        igmpState = IGMPProxyTestState(groups = groups, df = df)
-        IGMPProxyTestState(groups = groups, df = df)
-        tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        mcastTraffic = McastTraffic(groups, iface= tx_intf, cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        mcastTraffic.start()
-        t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-        t.start()
-        self.send_igmp_join([groups[0]],src_list= src, iface = rx_intf,delay=1)
-        t.join()
-        assert_equal(self.status, True)
-        join_state = IGMPProxyTestState(groups = [groups[0]])
-        status = self.igmp_recv_task(rx_intf, [groups[0]], join_state)
-        join_state = IGMPProxyTestState(groups = [groups[1]])
-        status = self.igmp_not_recv_task(rx_intf, [groups[1]], join_state)
-        log_test.info('verified status for igmp recv task %s'%status)
-	mcastTraffic.stop()
-        assert status == 1 , 'EXPECTED RESULT'
-        df.callback(0)
-        return df
-
-    #fail
-    @deferred(timeout=60)
-    def test_igmpproxy_with_leave_and_join_loop(self):
-        self.groups = ['226.0.1.1', '227.0.0.1', '228.0.0.1', '229.0.0.1', '230.0.0.1' ]
-        self.src_list = ['3.4.5.6', '7.8.9.10']
-	self.onos_igmp_proxy_config_load()
-	self.onos_ssm_table_load(self.groups,src_list=self.src_list)
-        df = defer.Deferred()
-        #self.df = df
-        self.iterations = 0
-        self.num_groups = len(self.groups)
-        self.MAX_TEST_ITERATIONS = 3
-        rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-	self.send_igmp_leave(self.groups,src_list = [], iface=rx_intf,delay=5)
-
-        def igmp_srp_task(v):
-              if self.iterations < self.MAX_TEST_ITERATIONS:
-                    if v == 1:
-                          ##join test
-                          self.num_groups = random.randint(0, len(self.groups))
-			  log_test.info('self.num_groups var is %s'%self.num_groups)
-			  try:
-			      for group in self.groups[:self.num_groups]:
-                                  t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                                  t.start()
-                                  self.send_igmp_join(group,src_list = self.src_list,
-                                              iface = rx_intf, delay = 1)
-			          t.join()
-				  assert_equal(self.status, True)
-			  except:
-				log_test.info('Got some unexpected error %s'%error)
-			        raise
-                    else:
-                          self.send_igmp_leave(self.groups[:self.num_groups],
-                                               src_list = [],
-                                               iface = rx_intf, delay = 10)
-                    self.iterations += 1
-                    v ^= 1
-                    reactor.callLater(1.0 + 0.5*self.num_groups,
-                                      igmp_srp_task, v)
-              else:
-                    df.callback(0)
-        reactor.callLater(0, igmp_srp_task, 1)
-        return df
-
-    def igmp_join_task(self, intf, groups, state, src_list = ['1.2.3.4']):
-          self.onos_ssm_table_load(groups, src_list)
-          igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                        gaddr=self.IP_DST)
-          for g in groups:
-                gr = IGMPv3gr(rtype = IGMP_V3_GR_TYPE_INCLUDE, mcaddr = g)
-                gr.sources = src_list
-                igmp.grps.append(gr)
-
-          for g in groups:
-                state.group_map[g][0].update(1, t = monotonic.monotonic())
-
-          pkt = self.igmp_eth/self.igmp_ip/igmp
-          IGMPv3.fixup(pkt)
-          sendp(pkt, iface=intf)
-          log_test.debug('Returning from join task')
-
-    def igmp_recv_task(self, intf, groups, join_state):
-          recv_socket = L3PacketSocket(iface = intf, type = ETH_P_IP)
-          group_map = {}
-          for g in groups:
-                group_map[g] = [0,0]
-
-          log_test.info('Verifying join interface %s should receive multicast data'%intf)
-          while True:
-                p = recv_socket.recv()
-                if p.dst in groups and group_map[p.dst][0] == 0:
-                      group_map[p.dst][0] += 1
-                      group_map[p.dst][1] = monotonic.monotonic()
-                      c = 0
-                      for g in groups:
-                            c += group_map[g][0]
-                      if c == len(groups):
-                            break
-          for g in groups:
-                join_start = join_state.group_map[g][0].start
-                recv_time = group_map[g][1] * 1000000
-                delta = (recv_time - join_start)
-                log_test.info('Join for group %s received in %.3f usecs' %
-                         (g, delta))
-
-          recv_socket.close()
-          log_test.debug('Returning from recv task')
-
-    def igmp_not_recv_task(self, intf, groups, join_state):
-	  log_test.info('Entering igmp not recv task loop')
-          recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
-          group_map = {}
-          for g in groups:
-                group_map[g] = [0,0]
-
-          log_test.info('Verifying join interface, should not receive any multicast data')
-          self.NEGATIVE_TRAFFIC_STATUS = 1
-          def igmp_recv_cb(pkt):
-                log_test.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
-                self.NEGATIVE_TRAFFIC_STATUS = 2
-          sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: IP in p and p[IP].dst in groups,
-                timeout = 3, opened_socket = recv_socket)
-          recv_socket.close()
-          return self.NEGATIVE_TRAFFIC_STATUS
-
-    def group_latency_check(self, groups):
-          tasks = []
-          self.send_igmp_leave(groups = groups,delay=10)
-          join_state = IGMPProxyTestState(groups = groups)
-          tasks.append(threading.Thread(target=self.igmp_join_task, args = ('veth0', groups, join_state,)))
-          traffic_state = IGMPProxyTestState(groups = groups)
-          mcast_traffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                       arg = traffic_state)
-          mcast_traffic.start()
-          tasks.append(threading.Thread(target=self.igmp_recv_task, args = ('veth0', groups, join_state)))
-          for t in tasks:
-                t.start()
-          for t in tasks:
-                t.join()
-
-          mcast_traffic.stop()
-          self.send_igmp_leave(groups = groups)
-          return
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmpproxy_with_1group_join_latency(self):
-        groups = [self.random_mcast_ip()]
-        df = defer.Deferred()
-        def igmp_1group_join_latency():
-              self.group_latency_check(groups)
-              df.callback(0)
-        reactor.callLater(0, igmp_1group_join_latency)
-        return df
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
-    def test_igmpproxy_with_2group_join_latency(self):
-        groups = [self.MGROUP1, self.MGROUP1]
-        df = defer.Deferred()
-        def igmp_2group_join_latency():
-            self.group_latency_check(groups)
-            df.callback(0)
-        reactor.callLater(0, igmp_2group_join_latency)
-        return df
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 100)
-    def test_igmpproxy_with_Ngroup_join_latency(self):
-        groups = ['239.0.1.1', '240.0.1.1', '241.0.1.1', '242.0.1.1']
-        df = defer.Deferred()
-        def igmp_Ngroup_join_latency():
-            self.group_latency_check(groups)
-            df.callback(0)
-        reactor.callLater(0, igmp_Ngroup_join_latency)
-        return df
-
-    @deferred(70)
-    def test_igmpproxy_with_join_rover_all(self,iface='veth0'):
-	self.onos_igmp_proxy_config_load()
-	df = defer.Deferred()
-	def igmp_proxy_join_rover():
-              s = (224 << 16) | 1
-              #e = (225 << 24) | (255 << 16) | (255 << 16) | 255
-              e = (224 << 16) | 10
-              for i in xrange(s, e+1):
-                  if i&0xff:
-                      ip = '%d.%d.%d.%d'%((i>>16)&0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
-		  try:
-                      t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                      t.start()
-                      self.send_igmp_join(groups = [ip], ssm_load=True, iface = iface, delay=1)
-                      t.join()
-		  except:
-		      raise
-              df.callback(0)
-        reactor.callLater(0, igmp_proxy_join_rover)
-        return df
-
-    @deferred(timeout=ROVER_TEST_TIMEOUT)
-    def test_igmpproxy_with_join_rover(self):
-          df = defer.Deferred()
-          iface = self.get_igmp_intf()
-          self.df = df
-          self.count = 0
-          self.timeout = 0
-          self.complete = False
-          def igmp_join_timer():
-                self.timeout += self.ROVER_JOIN_TIMEOUT
-                log_test.info('IGMP joins sent: %d' %self.count)
-                if self.timeout >= self.ROVER_TIMEOUT:
-                      self.complete = True
-                reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
-
-          reactor.callLater(self.ROVER_JOIN_TIMEOUT, igmp_join_timer)
-          self.start_channel = (224 << 24) | 1
-          self.end_channel = (224 << 24) | 200 #(225 << 24) | (255 << 16) | (255 << 16) | 255
-          self.current_channel = self.start_channel
-          def igmp_join_rover(self):
-                #e = (224 << 24) | 10
-                chan = self.current_channel
-                self.current_channel += 1
-                if self.current_channel >= self.end_channel:
-                      chan = self.current_channel = self.start_channel
-                if chan&0xff:
-                      ip = '%d.%d.%d.%d'%((chan>>24)&0xff, (chan>>16)&0xff, (chan>>8)&0xff, chan&0xff)
-                      self.send_igmp_join([ip], delay = 0, ssm_load = False, iface = iface)
-                      self.count += 1
-                if self.complete == True:
-                      log_test.info('%d IGMP joins sent in %d seconds over %s' %(self.count, self.timeout, iface))
-                      self.df.callback(0)
-                else:
-                      reactor.callLater(0, igmp_join_rover, self)
-          reactor.callLater(0, igmp_join_rover, self)
-          return df
-
-    #fail
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 30)
-    def test_igmpproxy_sends_periodic_general_query_on_subscriber_connected_segment(self,iface='veth0'):
-	groups = [self.random_mcast_ip()]
-	self.onos_igmp_proxy_config_load()
-	self.onos_ssm_table_load(groups)
-	self.send_igmp_join(groups)
-	self.success = False
-        df = defer.Deferred()
-        def igmp_query_timeout():
-              def igmp_query_cb(pkt):
-		    log_test.info('received igmp query packet is %s'%pkt.show())
-		    self.success = True
-              sniff(prn = igmp_query_cb, count=1, lfilter = lambda p: IP in p and p[IP].proto == 2 and p[IP].dst == '224.0.0.1',
-	                               timeout = self.IGMP_QUERY_TIMEOUT+2, iface = iface)
-              df.callback(0)
-        self.send_igmp_join(groups)
-        self.test_timer = reactor.callLater(0,igmp_query_timeout)
-	assert_equal(self.success, True)
-        return df
-
-
-    @deferred(timeout=IGMP_QUERY_TIMEOUT + 30)
-    def test_igmpproxy_with_not_sending_periodic_general_query_on_proxy_connected_interface(self):
-        groups = [self.random_mcast_ip()]
-        self.onos_igmp_proxy_config_load()
-        self.onos_ssm_table_load(groups)
-        self.send_igmp_join(groups)
-	self.success = False
-        df = defer.Deferred()
-        def igmp_query_timeout():
-              def igmp_query_cb(pkt):
-                    log_test.info('received igmp query packet on proxy connected interface %s'%pkt.show())
-		    self.success = True
-              sniff(prn = igmp_query_cb, count=1, lfilter = lambda p: IP in p and p[IP].proto == 2 and p[IP].dst == '224.0.0.1',
-                                       timeout = self.IGMP_QUERY_TIMEOUT+2, iface = self.proxy_interfaces[0])
-              df.callback(0)
-        self.send_igmp_join(groups)
-        self.test_timer = reactor.callLater(0,igmp_query_timeout)
-	assert_equal(self.success, False)
-        return df
-
-    @deferred(50)
-    def test_igmpproxy_two_joins_one_leave_from_same_subscriber_and_verify_traffic(self,iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-                for group in groups:
-                    igmpState = IGMPProxyTestState(groups = [group], df = df)
-                    IGMPProxyTestState(groups = [group], df = df)
-                    mcastTraffic = McastTraffic([group], iface= tx_intf, cb = self.send_mcast_cb,
-                                   arg = igmpState)
-                    mcastTraffic.start()
-                    time.sleep(1)
-                    join_state = IGMPProxyTestState(groups = [group])
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-                    status = self.igmp_recv_task(iface, [group], join_state)
-                    if group is groups[1]:
-                        log_test.info('sending leave for group %s'%group)
-                        self.send_igmp_leave([group], delay = 11, iface = iface)
-                        join_state = IGMPProxyTestState(groups = [group])
-                        status = self.igmp_not_recv_task(rx_intf, [group], join_state)
-                        log_test.info('verified status for igmp recv task %s'%status)
-                        assert status == 1 , 'EXPECTED RESULT'
-                        log_test.info('verifying subscriber receives igmp traffic for group %s'%groups[0])
-			join_state = IGMPProxyTestState(groups = [groups[0]])
-			status = self.igmp_recv_task(iface, [groups[0]], join_state)
-		mcastTraffic.stop()
-            except:
-                log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    #fail
-    @deferred(50)
-    def test_igmpproxy_two_subscribers_joins_igmp_group_one_subscriber_goes_down_and_verify_traffic(self,iface1='veth0',iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-                igmpState = IGMPProxyTestState(groups = group, df = df)
-                IGMPProxyTestState(groups = group, df = df)
-                mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                arg = igmpState)
-                mcastTraffic.start()
-                time.sleep(1)
-                join_state = IGMPProxyTestState(groups = group)
-		for iface in [iface1, iface2]:
-		    positive_test = True if iface is iface1 else False
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-                    status = self.igmp_recv_task(iface, group, join_state)
-                    if iface is iface2:
-                        log_test.info('bringning donw iface %s'%iface)
-                        os.system('ifconfig {} down'.format(iface))
-                        time.sleep(1)
-                        os.system('ifconfig {} up'.format(iface))
-                        time.sleep(1)
-                        status = self.igmp_not_recv_task(iface, group, join_state)
-                        log_test.info('verified status for igmp recv task %s'%status)
-                        assert status == 1 , 'EXPECTED RESULT'
-                        log_test.info('verifying subscriber %s receives igmp traffic'%iface1)
-                        status = self.igmp_recv_task(iface1, group, join_state)
-                mcastTraffic.stop()
-            except:
-                log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    @deferred(50)
-    def test_igmpproxy_two_subscribers_join_different_igmp_groups_one_subscriber_leaves_and_verifying_traffic(self, iface1='veth0', iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(),self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-                for group in groups:
-                    igmpState = IGMPProxyTestState(groups = [group], df = df)
-                    IGMPProxyTestState(groups = [group], df = df)
-                    mcastTraffic = McastTraffic([group], iface= tx_intf, cb = self.send_mcast_cb,
-                                   arg = igmpState)
-                    mcastTraffic.start()
-                    time.sleep(1)
-                    join_state = IGMPProxyTestState(groups = [group])
-		    iface = iface1 if group is groups[0] else iface2
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                    t.start()
-                    self.send_igmp_join(groups = [group], src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              delay=1,iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-                    status = self.igmp_recv_task(iface, [group], join_state)
-                    if group is groups[1]:
-                        log_test.info('sending leave for group %s'%group)
-			time.sleep(3)
-                        t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                        t.start()
-                        self.send_igmp_leave([group], delay = 15, iface = iface)
-                        t.join()
-                        assert_equal(self.status, True)
-                        join_state = IGMPProxyTestState(groups = [group])
-                        status = self.igmp_not_recv_task(iface, [group], join_state)
-                        log_test.info('verified status for igmp recv task %s'%status)
-                        assert status == 1 , 'EXPECTED RESULT'
-                        log_test.info('verifying subscriber receives igmp traffic for group %s'%groups[0])
-                        join_state = IGMPProxyTestState(groups = [groups[0]])
-                        status = self.igmp_recv_task(iface1, [groups[0]], join_state)
-                mcastTraffic.stop()
-            except:
-                log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-	return df
-
-    @deferred(50)
-    def test_igmpproxy_with_two_subscriber_joining_same_igmp_group_one_subscriber_doing_fast_leave_verifying_traffic(self, iface1='veth0', iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load(FastLeave='true')
-            self.onos_ssm_table_load(group,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-                for iface in [iface1, iface2]:
-                    igmpState = IGMPProxyTestState(groups = group, df = df)
-                    IGMPProxyTestState(groups = group, df = df)
-                    mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                   arg = igmpState)
-                    mcastTraffic.start()
-                    time.sleep(1)
-                    join_state = IGMPProxyTestState(groups = group)
-		    positive_test = True if iface is iface1 else False
-                    t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface, kwargs = {'positive_test':positive_test})
-                    t.start()
-                    self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              delay=1,iface = iface)
-                    t.join()
-                    assert_equal(self.status, True)
-                    status = self.igmp_recv_task(iface, group, join_state)
-                    if iface is iface2:
-                        log_test.info('sending leave for group %s'%group)
-                        time.sleep(10)
-                        self.send_igmp_leave(group, delay = 1, iface = iface)
-                        join_state = IGMPProxyTestState(groups = group)
-                        status = self.igmp_not_recv_task(iface, group, join_state)
-                        log_test.info('verified status for igmp recv task %s'%status)
-                        assert status == 1 , 'EXPECTED RESULT'
-                        log_test.info('verifying subscriber receives igmp traffic for group %s'%group)
-                        join_state = IGMPProxyTestState(groups = group)
-                        status = self.igmp_recv_task(iface1, group, join_state)
-                mcastTraffic.stop()
-            except:
-                log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    #fail
-    @deferred(20)
-    def test_igmpproxy_with_multicast_source_connected_on_proxy_interface(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            igmpState = IGMPProxyTestState(groups = group, df = df)
-            IGMPProxyTestState(groups = group, df = df)
-            mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                  arg = igmpState)
-            mcastTraffic.start()
-            time.sleep(1)
-            join_state = IGMPProxyTestState(groups = group)
-	    try:
-                t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                t.start()
-                self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                              delay=1,iface = iface)
-                t.join()
-                assert_equal(self.status, True)
-                status = self.igmp_recv_task(iface, group, join_state)
-                mcastTraffic.stop()
-            except:
-                log_test.info('Got some unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
-
-    #fail
-    @deferred(20)
-    def test_igmpproxy_which_drops_multicast_traffic_for_exclude_record_type_group(self, iface='veth0'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            group = [self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(group,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-               igmpState = IGMPProxyTestState(groups = group, df = df)
-               IGMPProxyTestState(groups = group, df = df)
-               mcastTraffic = McastTraffic(group, iface= tx_intf, cb = self.send_mcast_cb,
-                                 arg = igmpState)
-               mcastTraffic.start()
-               time.sleep(1)
-               join_state = IGMPProxyTestState(groups = group)
-               t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-               t.start()
-               self.send_igmp_join(groups = group, src_list = src,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
-                              iface = iface)
-               t.join()
-               assert_equal(self.status, True)
-               status = self.igmp_not_recv_task(iface, group, join_state)
-               log_test.info('verified status for igmp recv task %s'%status)
-               assert status == 1 , 'EXPECTED RESULT'
-            except:
-               log_test.info('Got some unexpected error %s'%error)
-               raise
-	    mcastTraffic.stop()
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-	return df
-
-    #fail : exclude record type igmp join not forwarded to proxy interface
-    @deferred(40)
-    def test_igmpproxy_with_two_subscriber_joins_set_with_include_and_exclude_mode_record_types_verifying_traffic(self, iface1='veth0', iface2='veth4'):
-        df = defer.Deferred()
-        def igmp_proxy_test(df):
-            groups = [self.random_mcast_ip(), self.random_mcast_ip()]
-            src = [self.randomsourceip()]
-            self.onos_igmp_proxy_config_load()
-            self.onos_ssm_table_load(groups,src_list=src)
-            tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-            try:
-	       for group in groups:
-	           iface = iface1 if group is groups[0] else iface2
-		   r_type = IGMP_V3_GR_TYPE_INCLUDE if group is groups[0] else IGMP_V3_GR_TYPE_EXCLUDE
-                   igmpState = IGMPProxyTestState(groups = [group], df = df)
-                   IGMPProxyTestState(groups = [group], df = df)
-                   mcastTraffic = McastTraffic([group], iface= tx_intf, cb = self.send_mcast_cb,
-                                 arg = igmpState)
-                   mcastTraffic.start()
-                   time.sleep(1)
-                   join_state = IGMPProxyTestState(groups = [group])
-                   t = threading.Thread(target = self.verify_igmp_packets_on_proxy_interface)
-                   t.start()
-                   self.send_igmp_join(groups = [group], src_list = src,record_type = r_type,
-                              delay=1,iface = iface)
-                   t.join()
-                   assert_equal(self.status, True)
-		   if group is groups[0]:
-		       status = self.igmp_recv_task(iface, [group], join_state)
-		   else:
-                       status = self.igmp_not_recv_task(iface, [group], join_state)
-                       log_test.info('verified status for igmp recv task %s'%status)
-                       assert status == 1 , 'EXPECTED RESULT'
-            except:
-               log_test.info('Got some unexpected error %s'%error)
-               raise
-            mcastTraffic.stop()
-            df.callback(0)
-        reactor.callLater(0, igmp_proxy_test, df)
-        return df
diff --git a/src/test/igmpproxy/igmpproxyconfig.json b/src/test/igmpproxy/igmpproxyconfig.json
deleted file mode 100644
index 77cbae1..0000000
--- a/src/test/igmpproxy/igmpproxyconfig.json
+++ /dev/null
@@ -1,52 +0,0 @@
-{
-	"ports": {},
-	"hosts": {},
-	"devices": {
-		"of:0000000000000001": {
-			"basic": {
-				"driver": "default"
-			},
-			"accessDevice": {
-				"uplink": "2",
-				"vlan": "222",
-				"defaultVlan": "1"
-			}
-		}
-	},
-	"apps": {
-		"org.onosproject.provider.lldp": {
-			"suppression": {
-				"deviceTypes": ["ROADM"],
-				"annotation": "{\"no-lldp\":null}"
-			}
-		},
-		"org.opencord.igmpproxy": {
-			"igmpproxy": {
-				"globalConnectPointMode": "true",
-				"globalConnectPoint": "of:0000000000000001/1",
-				"UnsolicitedTimeOut": "2",
-				"MaxResp": "10",
-				"KeepAliveInterval": "120",
-				"KeepAliveCount": "3",
-				"LastQueryInterval": "2",
-				"LastQueryCount": "2",
-				"FastLeave": "false",
-				"PeriodicQuery": "true",
-				"IgmpCos": "7",
-				"withRAUpLink": "true",
-				"withRADownLink": "true"
-			},
-			"ssmTranslate": [{
-				"source": "10.34.56.78",
-				"group": "224.2.3.4"
-			}]
-		},
-		"org.opencord.mcast": {
-			"multicast": {
-				"ingressVlan": "222",
-				"egressVlan": "17"
-			}
-		}
-	},
-	"links": {}
-}
diff --git a/src/test/iperf/__init__.py b/src/test/iperf/__init__.py
deleted file mode 100644
index f2b8b2d..0000000
--- a/src/test/iperf/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-cli_dir = os.path.join(working_dir, '../cli')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/iperf/iperfTest.py b/src/test/iperf/iperfTest.py
deleted file mode 100644
index bc91cea..0000000
--- a/src/test/iperf/iperfTest.py
+++ /dev/null
@@ -1,219 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import time
-import os
-import subprocess
-from nose.tools import *
-from onosclidriver import OnosCliDriver
-from CordContainer import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from OnosCtrl import OnosCtrl
-from CordTestUtils import log_test as log
-
-log.setLevel('INFO')
-
-class iperf_exchange(unittest.TestCase):
-
-    switch_script = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup', 'of-bridge.sh')
-    switch = 'br-int'
-    ctlr_ip = os.getenv('ONOS_CONTROLLER_IP', 'localhost')
-    ctlr_port = '6653'
-    IPERF_TIMEOUT = 360
-    app = 'org.onosproject.dhcp'
-
-    @classmethod
-    def setUpClass(cls):
-        #cls.stop_switch()
-        #cls.install_app()
-        cmd = "apt-get install iperf"
-        os.system(cmd)
-        time.sleep(40)
-
-    @classmethod
-    def tearDownClass(cls):pass
-        #cls.onos_ctrl.deactivate()
-
-    @classmethod
-    def install_app(cls):
-        OnosCtrl.uninstall_app(cls.igmp_app)
-        time.sleep(2)
-        OnosCtrl.install_app(cls.igmp_app_file)
-        time.sleep(3)
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def iperf_tool_cmd_execution(cls,cmd = " "):
-        log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-
-        try:
-#           status = os.system(cmd)
-            status = subprocess.Popen(cmd, shell=True)
-            time.sleep(90)
-            pid = status.pid
-            log.info('Subprocess status = {}'.format(status))
-            log.info('Subprocess task id on host = {}'.format(pid))
-            status.terminate()
-        except Exception:
-            status.terminate()
-            main.log.exception( self.name + ": Uncaught exception!" )
-            main.cleanup()
-            main.exit()
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            os.system(cmd)
-            self.onos_ctrl = OnosCtrl(self.app)
-            status, _ = self.onos_ctrl.activate()
-            assert_equal(status, True)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_udp_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -u -t 20 -P 1 -i 1'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            self.onos_ctrl = OnosCtrl(self.app)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_window_of_40k_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -w 40k'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_window_of_120k_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -w 120k'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_window_of_520k_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -w 520k'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_multiple_tcp_sessions_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 5 -P 2 -i 1'
-            self.iperf_tool_cmd_execution(cmd = cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_multiple_udp_sessions_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -u -t 5 -P 2 -i 1'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_mss_with_90Bytes_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -m -M 90'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_mss_with_1490Bytes_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -m -M 1490'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
-
-    @deferred(IPERF_TIMEOUT)
-    def test_tcp_mss_with_9000Bytes_for_max_throughput_using_iperf(self):
-        df = defer.Deferred()
-        def iperf_network_test(df):
-            cmd = 'iperf -c 172.17.0.2 -p 6653 -t 20 -P 1 -i 1 -m -M 9000'
-            log.info('Test Controller by executing a iperf tool command on host = {}'.format(cmd))
-            status = os.system(cmd)
-            df.callback(0)
-        reactor.callLater(0, iperf_network_test, df)
-        return df
diff --git a/src/test/ipv6vrouter/__init__.py b/src/test/ipv6vrouter/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/ipv6vrouter/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/ipv6vrouter/ipv6vrouterTest.py b/src/test/ipv6vrouter/ipv6vrouterTest.py
deleted file mode 100644
index 2649a47..0000000
--- a/src/test/ipv6vrouter/ipv6vrouterTest.py
+++ /dev/null
@@ -1,792 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from scapy.all import *
-from CordTestUtils import get_mac, log_test
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnosFlowCtrl import OnosFlowCtrl
-from onosclidriver import OnosCliDriver
-#from quaggaclidriver import QuaggaCliDriver
-from CordContainer import Container, Onos, Quagga
-from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_quagga_stop, cord_test_quagga_shell
-from portmaps import g_subscriber_port_map
-from CordLogger import CordLogger
-import threading
-import time
-import os
-import json
-import pexpect
-import random
-from netaddr.ip import IPNetwork, IPAddress
-
-#from cli import quagga
-#from quagga import *
-#from cli import requires
-#from cli import system
-#from generic import *
-
-log_test.setLevel('INFO')
-
-class ipv6vrouter_exchange(CordLogger):
-
-    apps = ('org.onosproject.vrouter', 'org.onosproject.fwd')
-    device_id = 'of:' + get_mac()
-    vrouter_device_dict = { "devices" : {
-                "{}".format(device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-    zebra_conf = '''
-password zebra
-log stdout
-service advanced-vty
-!
-!debug zebra rib
-!debug zebra kernel
-!debug zebra fpm
-!
-interface eth1
- ipv6 address 2001::10/32
-line vty
- exec-timeout 0 0
-'''
-
-#! ip address 10.10.0.3/16
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    quagga_config_path = os.path.join(test_path, '..', 'setup/quagga-config')
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '1000:10:0:0:0:0:0:164'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    MAX_PORTS = 100
-    peer_list = [ ('2001:0:10:0:0:0:10:1', '00:00:00:00:00:01'), ('2001:0:20:0:0:0:20:1', '00:00:00:00:02:01'), ]
-    network_list = []
-    network_mask = 64
-    default_routes_address = ('1001:0:10:0::/32',)
-    default_peer_address = peer_list
-    quagga_ip = os.getenv('QUAGGA_IP')
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the vrouter apps'''
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        cls.load_device_id()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the vrouter apps'''
-        cls.vrouter_host_unload()
-        cls.start_onos(network_cfg = {})
-
-    @classmethod
-    def load_device_id(cls):
-        did = OnosCtrl.get_device_id()
-        cls.device_id = did
-        cls.vrouter_device_dict = { "devices" : {
-                "{}".format(did) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-            },
-        }
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def onos_load_config(cls, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    @classmethod
-    def vrouter_config_get(cls, networks = 4, peers = 1, peer_address = None,
-                           route_update = None, router_address = None, specific_peer = None):
-        vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers,specific_peer = specific_peer,
-                                                    peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    @classmethod
-    def host_config_load(cls, host_config = None):
-        for host in host_config:
-            status, code = OnosCtrl.host_config(host)
-            if status is False:
-                log_test.info('JSON request returned status %d' %code)
-                assert_equal(status, True)
-
-    @classmethod
-    def generate_host_config(cls,hosts_list=None):
-        num = 1
-        hosts_dict = {}
-	if hosts_list is not None:
-	    hosts = hosts_list
-	else:
-	    hosts = cls.peer_list
-        for host, mac in hosts:
-            port = num  if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': port}}
-            num += 1
-            return hosts_dict.values()
-
-
-    @classmethod
-    def vrouter_host_load(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            log_test.info('Assigning ip %s to interface %s' %(host, iface))
-            config_cmds = ( 'ifconfig {} 0'.format(iface),
-                            'ifconfig {0} inet6 add {1}/64'.format(iface, host),
-                            'arping -I {0} {1} -c 2'.format(iface, host),
-                            )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def vrouter_host_unload(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            config_cmds = ('ifconfig {} 0'.format(iface), )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def start_onos(cls, network_cfg = None):
-        if type(network_cfg) is tuple:
-            res = []
-            for v in network_cfg:
-                res += v.items()
-            config = dict(res)
-        else:
-            config = network_cfg
-        log_test.info('Restarting ONOS with new network configuration %s'%config)
-        return cord_test_onos_restart(config = config)
-
-    @classmethod
-    def randomipv6(cls, subnet='2001::', prefix=64):
-	random.seed()
- 	ipv6_address = IPAddress(subnet) + random.getrandbits(16)
-	ipv6_network = IPNetwork(ipv6_address)
-	ipv6_network.prefixlen = prefix
-	output =  '{},{}'.format(ipv6_address,ipv6_network)
-	return '{}'.format(ipv6_address),'{}'.format(ipv6_network)
-
-    @classmethod
-    def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
-	log_test.info('Peer address in quagga start is %s'%peer_address)
-        log_test.info('Restarting Quagga container with configuration for %d networks' %(networks))
-        config = cls.generate_conf(networks = networks, peer_address = peer_address, router_address = router_address)
-        if networks <= 10000:
-            boot_delay = 25
-        else:
-            delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
-            n = min(networks/100000, len(delay_map)-1)
-            boot_delay = delay_map[n]
-        cord_test_quagga_restart(config = config, boot_delay = boot_delay)
-
-    @classmethod
-    def generate_vrouter_conf(cls, networks = 4, peers = 1, peer_address = None, specific_peer = None,router_address = None):
-	num = 0
-	count = 0
-	if peer_address is None:
-	    start_peer =   ( 10 << 16 )
-            end_peer =     ( 9999 << 16 )
-	else:
-	   ip = peer_address[0][0]
-           start_ip = ip.split(':')
-           start_peer =  ( int(start_ip[6]) << 16)
-           end_peer =   ( 9999 << 16 )
-	local_network = end_peer + 1
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        peer_list = []
-	for n in xrange(start_peer, end_peer, 65536):
-	    port_map = ports_dict['ports']
-            port = num+1  if count < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-	    try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-	    if specific_peer is None:
-                peer_ip = '2001:0:0:0:0:0:' + '%s:1'%( ( n >> 16 ) & 0xffff )
-	    else:
-		start_ip[6] = '%s'%( ( n >> 16 ) & 0xffff )
-		start_ip[-1] = '1'
-		peer_ip = ':'.join(start_ip)
-	    peer_nt = peer_ip + '/112'
-	    mac = RandMAC()._fix()
-	    peer_list.append((peer_ip, mac))
-	    log_test.info('peer ip is %s and and peer network is %s'%(peer_ip,peer_nt))
-	    if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [peer_nt], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(peer_nt)
-            num += 1
-	    if num == peers:
-		break
-        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
-        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
-        quagga_router_dict['ospfEnabled'] = True
-        quagga_router_dict['interfaces'] = interface_list
-        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-
-        #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
-        bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
-        speaker_dict = {}
-        speaker_dict['name'] = 'bgp{}'.format(peers+1)
-        speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-        speaker_dict['peers'] = peer_list
-        bgp_speakers_list.append(speaker_dict)
-        cls.peer_list = peer_list
-        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
-
-
-    @classmethod
-    def generate_conf(cls, networks = 4, peer_address = None, router_address = None):
-        num = 0
-        if router_address is None:
-            start_network =   ( 10 << 16 )
-            end_network =     ( 9999 << 16 )
-            network_mask = 112
-        else:
-           ip = router_address
-           start_ip = ip.split(':')
-           network_mask = int(start_ip[7].split('/')[1])
-           start_network = (int(start_ip[6]) << 16)
-           end_network = (9999 << 16)
-        net_list = []
-        peer_list = peer_address if peer_address is not None else cls.peer_list
-        network_list = []
-        for n in xrange(start_network, end_network, 65536):
-	    if router_address is None:
-                net = '3001:0:0:0:0:0:' + '%s:0'%( ( n >> 16 ) & 0xffff )
-	    else:
-		start_ip[6] = '%s'%( ( n >> 16 ) & 0xffff )
-		net = ':'.join((start_ip[0],start_ip[1],start_ip[2],start_ip[3],start_ip[4],start_ip[5],start_ip[6],start_ip[7][0]))
-            network_list.append(net)
-            gateway = peer_list[num % len(peer_list)][0]
-            net_route = 'ipv6 route {0}/{1} {2}'.format(net, network_mask, gateway)
-            net_list.append(net_route)
-            num += 1
-            if num == networks:
-                break
-        cls.network_list = network_list
-        cls.network_mask = network_mask
-        zebra_routes = '\n'.join(net_list)
-        return cls.zebra_conf + zebra_routes
-
-    @classmethod
-    def vrouter_activate(cls, deactivate = False):
-        app = 'org.onosproject.vrouter'
-        onos_ctrl = OnosCtrl(app)
-        if deactivate is True:
-            onos_ctrl.deactivate()
-        else:
-            onos_ctrl.activate()
-        time.sleep(3)
-
-    @classmethod
-    def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,specific_peer = None,
-                          route_update = None, router_address = None, time_expire = None, adding_new_routes = None):
-        vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,specific_peer = specific_peer,
-                                                 peer_address = peer_address, route_update = route_update)
-        cls.start_onos(network_cfg = vrouter_configs)
-	hostcfg = cls.generate_host_config()
-        cls.host_config_load(host_config = hostcfg)
-        ##Start quagga
-        cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1000:11:12:13:14:15:16:17'
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
-                  prn = recv_cb, iface = self.port_map[ingress])
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = src_mac, dst = dst_mac)
-        L3 = IPv6(src=src_ip,dst = dst_ip)
-        pkt = L2/L3
-        log_test.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
-                 (dst_ip, dst_mac, self.port_map[egress]))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = self.port_map[egress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def vrouter_traffic_verify(self, positive_test = True, peer_address = None):
-        if peer_address is None:
-            peers = len(self.peer_list)
-            peer_list = self.peer_list
-        else:
-            peers = len(peer_address)
-            peer_list = peer_address
-        egress = peers + 1
-        num = 0
-        num_hosts = 5 if positive_test else 1
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1000:11:12:13:14:15:16:17'
-	last_bytes = [1234,8364,7360,'0af3','fdac']
-        for network in self.network_list:
-            num_ips = num_hosts
-            octets = network.split(':')
-            for  byte in last_bytes:
-                octets[-1] = str(byte)
-                dst_ip = ':'.join(octets)
-                dst_mac = peer_list[ num % peers ] [1]
-                port = (num % peers)
-                ingress = port + 1
-                #Since peers are on the same network
-                ##Verify if flows are setup by sending traffic across
-                self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
-            num += 1
-
-    def __vrouter_network_verify(self, networks, peers = 1, positive_test = True,
-                                 start_network = None, start_peer_address = None, route_update = None,
-                                 invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
-                                 deactivate_activate_vrouter = None, adding_new_routes = None,
-				 specific_peer = None):
-
-        _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
-                                                          peer_address = start_peer_address,
-                                                          route_update = route_update,
-                                                          router_address = start_network,
-                                                          time_expire = time_expire,
-                                                          adding_new_routes = adding_new_routes,
-							  specific_peer = specific_peer)
-	if self.network_list > 50:
-		wait = len(self.network_list)/20
-		time.sleep(wait)
-		log_test.info('waiting for %d seconds to verify routes in ONOS'%wait)
-	else:
-		time.sleep(5)
-	self.cliEnter()
-	routes = json.loads(self.cli.routes(jsonFormat = True))
-	assert_equal(len(routes['routes6']), networks)
-	if invalid_peers is None:
-            self.vrouter_traffic_verify()
-	if time_expire is True:
-            self.start_quagga(networks = networks, peer_address = start_peer_address, router_address = '12.10.10.1/24')
-            self.vrouter_traffic_verify()
-	if unreachable_route_traffic is True:
-            network_list_backup = self.network_list
-            self.network_list = ['1:1:1:1:1:1:1:1','2:2:2:2:2:2:2:2','3:3:3:3:3:3:3:3','4:4:4:4:4:4:4:4']
-            self.vrouter_traffic_verify(positive_test = False)
-            self.network_list = network_list_backup
-	if deactivate_activate_vrouter is True:
-            log_test.info('Deactivating vrouter app in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = True)
-            #routes = json.loads(self.cli.routes(jsonFormat = False, cmd_exist = False))
-            #assert_equal(len(routes['routes4']), 'Command not found')
-            log_test.info('Activating vrouter app again in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = False)
-	    if self.network_list > 50:
-                wait = len(self.network_list)/20
-                time.sleep(wait)
-                log_test.info('waiting for %d seconds to verify routes in ONOS'%wait)
-            else:
-                time.sleep(5)
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            assert_equal(len(routes['routes4']), networks)
-            self.vrouter_traffic_verify()
-	self.cliExit()
-        return True
-
-    def __vrouter_network_verify_negative(self, networks, peers = 1):
-        ##Stop quagga. Test traffic again to see if flows were removed
-        log_test.info('Stopping Quagga container')
-        cord_test_quagga_stop()
-        self.vrouter_traffic_verify(positive_test = False)
-        log_test.info('OVS flows have been removed successfully after Quagga was stopped')
-        self.start_quagga(networks = networks)
-        self.vrouter_traffic_verify()
-        log_test.info('OVS flows have been successfully reinstalled after Quagga was restarted')
-
-    def quagga_shell(self, cmd):
-        shell_cmds = ('vtysh', '"conf t"', '"{}"'.format(cmd))
-        quagga_cmd = ' -c '.join(shell_cmds)
-        return cord_test_quagga_shell(quagga_cmd)
-
-    def test_vrouter_ipv6_with_5_routes(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_5_routes_quagga_restart_without_config(self):
-	res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('Restart Quagga container without config retain')
-        cord_test_quagga_restart()
-        self.vrouter_traffic_verify(positive_test = False)
-
-    def test_vrouter_ipv6_with_5_routes_quagga_restart_with_config(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after Quagga restart with config retain')
-        #cord_test_quagga_restart()
-        self.start_quagga(networks=5)
-        self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_ipv6_with_5_routes_quagga_stop(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after Quagga stop')
-        cord_test_quagga_stop()
-        self.vrouter_traffic_verify(positive_test = False)
-
-    def test_vrouter_ipv6_with_5_routes_quagga_stop_and_start(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after Quagga stop and start again')
-        cord_test_quagga_stop()
-        self.vrouter_traffic_verify(positive_test = False)
-	self.start_quagga(networks=5)
-	self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_ipv6_with_5_routes_onos_restart_without_config(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after ONOS restart without config retain')
-	cord_test_onos_restart()
-        self.vrouter_traffic_verify(positive_test = False)
-
-    def test_vrouter_ipv6_with_5_routes_onos_restart_with_config(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after ONOS restart with config retain')
-	vrouter_configs = self.vrouter_config_get(networks = 5, peers = 1,
-                                                 peer_address = None, route_update = None)
-        self.start_onos(network_cfg=vrouter_configs)
-	mac = RandMAC()._fix()
-	hostcfg = self.generate_host_config(hosts_list = [('2001:0:0:0:0:0:10:1',mac)])
-        self.host_config_load(host_config = hostcfg)
-	time.sleep(10)
-        self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_ipv6_with_5_routes_restart_quagga_and_onos_with_config(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-        log_test.info('verifying vrouter traffic after Quagga and ONOS restart with config retain')
-	#cord_test_quagga_restart()
-	self.start_quagga(networks=5)
-        vrouter_configs = self.vrouter_config_get(networks = 5, peers = 1,
-                                                 peer_address = None, route_update = None)
-        self.start_onos(network_cfg = vrouter_configs)
-        mac = RandMAC()._fix()
-	hostcfg = self.generate_host_config(hosts_list = [('2001:0:0:0:0:0:10:1',mac)])
-        self.host_config_load(host_config = hostcfg)
-        time.sleep(10)
-        self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_ipv6_with_5_routes_2_peers(self):
-        res = self.__vrouter_network_verify(5, peers = 2)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_6_routes_3_peers(self):
-        res = self.__vrouter_network_verify(6, peers = 3)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_50_routes(self):
-        res = self.__vrouter_network_verify(50, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_50_routes_5_peers(self):
-        res = self.__vrouter_network_verify(50, peers = 5)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_100_routes(self):
-        res = self.__vrouter_network_verify(100, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_100_routes_10_peers(self):
-        res = self.__vrouter_network_verify(100, peers = 10)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_300_routes(self):
-        res = self.__vrouter_network_verify(300, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_1k_routes(self):
-        res = self.__vrouter_network_verify(1000, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_9k_routes(self):
-        res = self.__vrouter_network_verify(9000, peers = 1)
-        assert_equal(res, True)
-
-    @nottest # Need to implement logic for generating more than 10000 routes
-    def test_vrouter_ipv6_with_100000_routes(self):
-        res = self.__vrouter_network_verify(100000, peers = 1)
-        assert_equal(res, True)
-
-    @nottest # Need to implement logic for generating more than 10000 routes
-    def test_vrouter_ipv6_with_1000000_routes(self):
-        res = self.__vrouter_network_verify(1000000, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_route_update(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        assert_equal(res, True)
-        peer_info = [('2001:0:0:0:0:0:72:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:73:1', '00:00:00:00:02:01')]
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, route_update = True)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_64bit_mask_route_update(self):
-        router_address = '3001:0:0:0:0:0:56:0/64'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_32bit_route_update(self):
-        router_address = '3112:90c4:836a:7e56:0:0:06:0/32'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_16bit_route_update(self):
-        router_address = '9961:9474:0:8472:f30a:0:06:0/16'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True,start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_48bit_route_update(self):
-        router_address = 'c34a:9737:14cd:8730:0:0:06:0/48'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_classless_route_update(self):
-        router_address = '3001:430d:76cb:f56e:873:0:677:0/67'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_classless_duplicate_route_update(self):
-        router_address = '3001:8730:732:723:0:0:677:0/116'
-        res = self.__vrouter_network_verify(5, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_invalid_peers(self):
-        peer_info = [('FE80:0:0:0:C800:27FF:10:8', '00:00:00:00:01:01'), ('FE80:0:0:0:C800:27FF:11:8', '00:00:00:00:02:01')]
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, specific_peer=True,invalid_peers= True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_traffic_sent_between_peers_connected_to_onos(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, traffic_running_between_peers = True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_routes_time_expire(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, time_expire = True)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_unreachable_route(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, unreachable_route_traffic = True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_ipv6_with_enabling_disabling_vrouter_app(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, deactivate_activate_vrouter = True)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_adding_new_routes_in_routing_table(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        cmd = 'ipv6 route 4001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:10:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify()
-        self.network_list = [ '4001:0:0:0:0:0:677:0' ]
-        self.network_mask = 64
-        self.vrouter_traffic_verify()
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_adding_new_routes_in_quagga_routing_table_and_restart(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        cmd = 'ipv6 route 4001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:10:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify()
-        self.network_list = [ '4001:0:0:0:0:0:677:0' ]
-        self.network_mask = 64
-        self.vrouter_traffic_verify()
-	log_test.info('verifying vrouter traffic for added  routes after Quagga restart with old config only retain')
-        #cord_test_quagga_restart()
-        self.start_quagga(networks=5)
-        self.vrouter_traffic_verify(positive_test = False)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_with_removing_old_routes_in_routing_table(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        cmd = 'ipv6 route 4001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:10:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify()
-        old_network_list = self.network_list
-        old_network_mask = self.network_mask
-        self.network_list = [ '4001:0:0:0:0:0:677:0' ]
-        self.network_mask = 64
-        self.vrouter_traffic_verify()
-        assert_equal(res, True)
-        cmd = 'no ipv6 route 4001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:10:1'
-        self.quagga_shell(cmd)
-        time.sleep(5)
-        self.vrouter_traffic_verify(positive_test = False)
-        self.network_mask = old_network_mask
-        self.network_list = old_network_list
-        self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_ipv6_modifying_nexthop_route_in_routing_table(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/112'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:677:0/112 2001:0:0:0:0:0:18:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-
-    def test_vrouter_ipv6_deleting_alternative_nexthop_in_routing_table(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/112'
-        res = self.__vrouter_network_verify(1, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:677:0/112 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        time.sleep(5)
-        self.vrouter_traffic_verify(positive_test = False)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_deleting_some_routes_in_routing_table(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/112'
-        res = self.__vrouter_network_verify(10, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:677:0/112 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:678:0/112 2001:0:0:0:0:0:13:1'
-        self.quagga_shell(cmd)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:679:0/112 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_deleting_some_routes_in_quagga_routing_table_and_restart(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/112'
-        res = self.__vrouter_network_verify(10, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:677:0/112 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:678:0/112 2001:0:0:0:0:0:13:1'
-        self.quagga_shell(cmd)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:679:0/112 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-	self.network_list = [ '3001:0:0:0:0:0:677:0','3001:0:0:0:0:0:678:0','3001:0:0:0:0:0:679:0' ]
-	self.network_mask = 112
-        self.vrouter_traffic_verify(positive_test = False)
-	self.network_list = [ '3001:0:0:0:0:0:680:0','3001:0:0:0:0:0:681:0' ]
-        self.vrouter_traffic_verify(positive_test = True)
-	#cord_test_quagga_restart()
-        self.start_quagga(networks=10)
-	self.network_list = [ '3001:0:0:0:0:0:677:0','3001:0:0:0:0:0:681:0' ]
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-
-    def test_vrouter_ipv6_deleting_and_adding_routes_in_routing_table(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/64'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ipv6 route 3001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        cmd = 'ipv6 route 3001:0:0:0:0:0:677:0/64 2001:0:0:0:0:0:12:1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-    def test_vrouter_ipv6_toggling_nexthop_interface(self):
-        peer_info = [('2001:0:0:0:0:0:12:1', '00:00:00:00:01:01'), ('2001:0:0:0:0:0:13:1', '00:00:00:00:02:01')]
-        router_address = '3001:0:0:0:0:0:677:0/64'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_peer_address = peer_info, start_network  = router_address)
-        iface = self.port_map[1]
-        #toggle the interface to trigger host removal.
-        cmds = ('ifconfig {} down'.format(iface),
-                'sleep 2',
-                'ifconfig {} 0'.format(iface),)
-        for cmd in cmds:
-            os.system(cmd)
-        self.vrouter_traffic_verify(positive_test = False)
-        host = "2001:0:0:0:0:0:12:1"
-        cmd = 'ifconfig {0} {1} up'.format(iface, host)
-        os.system(cmd)
-        #wait for arp refresh
-        time.sleep(60)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
diff --git a/src/test/md5/__init__.py b/src/test/md5/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/md5/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/md5/md5AuthTest.py b/src/test/md5/md5AuthTest.py
deleted file mode 100644
index 81abd14..0000000
--- a/src/test/md5/md5AuthTest.py
+++ /dev/null
@@ -1,51 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import os,sys
-from EapMD5 import MD5AuthTest
-
-class eap_auth_exchange(unittest.TestCase):
-      def test_eap_md5(self):
-          t = MD5AuthTest()
-          t.runTest()
-      def test_eap_md5_wrg_password(self):
-          t =  MD5AuthTest()
-          t._wrong_password()
-          t.runTest()
-
-if __name__ == '__main__':
-          t =  MD5AuthTest()
-          t.runTest()
-          ####### Start the EAP-MD5 Negative testcase 
-          t._wrong_password()
-          t.runTest()
-
diff --git a/src/test/mini/__init__.py b/src/test/mini/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/mini/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/mini/miniTest.py b/src/test/mini/miniTest.py
deleted file mode 100644
index 8d17baa..0000000
--- a/src/test/mini/miniTest.py
+++ /dev/null
@@ -1,749 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from OnosCtrl import OnosCtrl
-from OnosFlowCtrl import OnosFlowCtrl
-from OltConfig import OltConfig
-from onosclidriver import OnosCliDriver
-from functools import partial
-#from CordContainer import Onos
-utils_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../utils')
-sys.path.append(utils_dir)
-sys.path.insert(1, '/usr/local/lib/python2.7/dist-packages/requests')
-import time, monotonic
-from CordContainer import Onos
-from OnosLog import OnosLog
-from CordLogger import CordLogger
-from CordTestUtils import log_test as log
-import os
-import json
-import random
-import collections
-from mininet.net import Mininet
-from mininet.topo import SingleSwitchTopo,LinearTopo,Topo
-from mininet.topolib import TreeTopo
-#from mininet.clean import Cleanup
-from mininet.node import Controller, RemoteController, Switch
-from mininet.cli import CLI
-from mininet.log import setLogLevel, info
-from mininet.link import TCLink
-from mininet.util import dumpNodeConnections
-from mininet.node import CPULimitedHost
-log.setLevel('INFO')
-
-class mininet_exchange(unittest.TestCase):
-    app = 'org.onosproject.fwd'
-    controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
-    controller = controller.split(',')[0]
-
-    @classmethod
-    def setUpClass(cls):
-	pass
-
-    @classmethod
-    def tearDownClass(cls):
-	pwd = os.getcwd()
-	log.info('teardown- current working dir is %s'%pwd)
-	os.chdir('/root')
-	cmds = ['rm -r mininet','apt-get remove mininet']
-	for cmd in cmds:
-            os.system(cmd)
-        os.chdir(pwd)
-        log.info('teardown- dir after removing mininet is %s'%os.getcwd())
-        time.sleep(5)
-
-    def setUp(self):
-        self.onos_ctrl = OnosCtrl(self.app)
-        self.onos_ctrl.activate()
-
-    def tearDown(self):
-        self.onos_ctrl = OnosCtrl(self.app)
-        self.onos_ctrl.deactivate()
-
-    def cliEnter(self, controller = None):
-        retries = 0
-        while retries < 30:
-            self.cli = OnosCliDriver(controller = controller, connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    def test_creation_of_topology(self):
-        try:
-            net = Mininet( topo=None, build=False)
-            h1 = net.addHost( 'h1')
-            h2 = net.addHost( 'h2' )
-            h3 = net.addHost( 'h3' )
-            s1 = net.addSwitch( 's1', dpid="0000000000000201")
-            s2 = net.addSwitch( 's2', dpid="0000000000000202")
-            s3 = net.addSwitch( 's3', dpid="0000000000000203")
-            net.addLink(h1, s1, )
-            net.addLink(h2, s2, )
-            net.addLink(h3, s3, )
-            net.addLink(s1, s2, )
-            net.addLink(s2, s3, )
-            #net.build()
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-	    s1.start( [ctrl] )
-            s2.start( [ctrl] )
-            s3.start( [ctrl] )
-	    #CLI(net)
-	    for switch in net.switches:
-		log.info('dpid of switch is %s'%switch.dpid)
-	    for host in net.hosts:
-	   	log.info('host %s added with IP addres %s'%(host.name,host.IP()))
-            net.stop()
-	    log.info('Successfully created  mininet topology and connected to cluster controllers')
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_creation_of_single_switch_topology(self,hosts=5):
-        try:
-            topo = SingleSwitchTopo(hosts)
-            net = Mininet(topo=topo )
-            net.start()
-            log.info('Node connections are %s'%dumpNodeConnections(net.hosts))
-      	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-	    for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response is %s'%response)
-            assert_equal(response,0.0)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_creation_of_linear_topology(self,switches=5):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            log.info('Node connections are %s'%dumpNodeConnections(net.hosts))
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response is %s'%response)
-            assert_equal(response,0.0)
-	    #CLI(net)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating minine topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_creation_of_tree_topology(self):
-        try:
-            topo = TreeTopo(depth=2,fanout=2)
-            net = Mininet(topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response is %s'%response)
-            assert_equal(response,0.0)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_executing_commands_from_mininet_host(self,switches=4):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            for host in net.hosts:
-                result = host.cmd('ping -c 2', net.switches[0].IP())
-                log.info('Result is %s'%result)
-                res = result.find('icmp_seq')
-                assert_not_equal(res, -1)
-            net.stop()
-        except Exception as Error:
-            Cleanup.cleanup()
-            log.info('Error while creating topology is %s'%Error)
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pingall_from_mininet(self,switches=5):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                log.info('switch is %s'%switch  )
-                switch.start([ctrl])
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            assert_equal(response,0.0)
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_initiating_pingall_from_mininet_with_onos_app_deactivation(self,switches=3):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('PingAll response before onos app \'org.onosproject.fwd\' deactivate is %s'%response)
-            assert_equal(response, 0.0)
-            OnosCtrl(self.app).deactivate()
-            response = net.pingAll()
-            log.info('PingAll response after onos app \'org.onosproject.fwd\' deactivate is %s'%response)
-            assert_equal(response, 100.0)
-	    net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-	Cleanup.cleanup()
-
-    def test_verifying_mininet_hosts_in_onos_controller(self,switches=4):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet( topo=topo)
-	    net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            log.info('mininet all the devices IDs %s'%net.keys())
-            log.info('mininet all the devices details %s'%net.values())
-            log.info('mininet all the devices information %s'%net.items())
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            assert_equal(response, 0.0)
-            self.cliEnter()
-            hosts = json.loads(self.cli.hosts(jsonFormat = True))
-            log.info('Discovered hosts: %s' %hosts)
-            assert_equal(len(hosts),switches)
-            self.cliExit()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_tcp_bandwidth_measure_between_mininet_hosts_using_iperf(self):
-        try:
-	    topo = TreeTopo(depth=2,fanout=2)
-            net = Mininet( topo=topo, host=CPULimitedHost, link=TCLink, build=False)
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('PingAll response is %s'%response)
-            bandwidth = net.iperf()
-            log.info('TCP Bandwidth between hosts measured using iperf is %s'%bandwidth)
-            assert_equal(len(bandwidth),2)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_udp_bandwidth_measure_between_mininet_hosts_using_iperf(self):
-        try:
-            topo = TreeTopo(depth=2,fanout=2)
-            net = Mininet( topo=topo, host=CPULimitedHost, link=TCLink, build=False)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            bandwidth = net.iperf(l4Type = 'UDP')
-            log.info('UDP Bandwidth between hosts measured using iperf is %s'%bandwidth)
-            assert_equal(len(bandwidth),3)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_tcp_bandwidth_between_mininet_hosts_using_iperf_with_one_host_removed(self,switches=3):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            iperf = net.iperf(l4Type='TCP')
-            log.info('Iperf response before host removed is %s'%iperf)
-            assert_equal(len(iperf),2)
-	    net.delNode(net.hosts[2])
-            iperf = net.iperf(l4Type='TCP')
-            log.info('Iperf response after host removed is %s'%iperf)
-	    assert_equal(len(iperf),2)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_udp_bandwidth_between_mininet_hosts_using_iperf_with_one_host_removed(self,switches=3):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            iperf = net.iperf(l4Type='UDP')
-            log.info('Iperf response before host removed is %s'%iperf)
-            assert_equal(len(iperf),3)
-            net.delNode(net.hosts[2])
-            iperf = net.iperf(l4Type='UDP')
-            log.info('Iperf response after host removed is %s'%iperf)
-	    assert_equal(len(iperf),3)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_hosts_assigned_with_non_default_ip_address(self):
-        try:
-            net = Mininet( topo=None, controller=RemoteController, host=CPULimitedHost, link=TCLink, build=False)
-            h1 = net.addHost( 'h1', ip='192.168.10.1/24' )
-            h2 = net.addHost( 'h2', ip='192.168.10.10/24' )
-            s1 = net.addSwitch( 's1')
-            s2 = net.addSwitch( 's2')
-            net.addLink(h1, s1, )
-            net.addLink(h2, s2, )
-            net.addLink(s1, s2, )
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            net.start()
-            assert_equal(net.hosts[0].IP(),'192.168.10.1')
-            assert_equal(net.hosts[1].IP(),'192.168.10.10')
-            response = net.pingAll()
-            log.info('PingAll response is %s'%response)
-            assert_equal(response,0.0)
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_hosts_assigned_with_non_default_ip_address_in_different_subnets(self):
-        try:
-            net = Mininet( topo=None, controller=RemoteController, host=CPULimitedHost, link=TCLink, build=False)
-            h1 = net.addHost( 'h1', ip='192.168.10.10/24' )
-            h2 = net.addHost( 'h2', ip='192.168.20.10/24' )
-            s1 = net.addSwitch( 's1')
-            s2 = net.addSwitch( 's2')
-            net.addLink(h1, s1, )
-            net.addLink(h2, s2, )
-            net.addLink(s1, s2, )
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            assert_equal(net.hosts[0].IP(),'192.168.10.10')
-            assert_equal(net.hosts[1].IP(),'192.168.20.10')
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            assert_equal(response,100.0)
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pingall_with_connection_remove_between_switches(self,switches=4):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-	    #net.build()
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response before link delete is %s'%response)
-            assert_equal(response,0.0)
-            log.info('Deleting link between switches s1 and s2')
-            net.delLinkBetween(net.switches[0], net.switches[1], )
-            response = net.pingAll()
-            log.info('Pingall response after the link delete is is %s'%response)
-            assert_not_equal(response,0.0)
-            net.stop()
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pingall_with_removing_one_mininet_host(self,switches=3):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-	    #net.build()
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response before host delete is %s'%response)
-            assert_equal(response,0.0)
-            log.info('removing host h2')
-            net.delNode(net.hosts[1])
-            response = net.pingAll()
-            log.info('Pingall response after host delete is %s'%response)
-            assert_equal(response,0)
-            net.stop()
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pingall_with_removing_one_mininet_switch(self,switches=3):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-	    #net.build()
-            net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response before switch delete is %s'%response)
-            assert_equal(response,0.0)
-            log.info('Deleting switch s2')
-            net.delNode(net.switches[1])
-            response = net.pingAll()
-            log.info('Pingall response after switch delete is %s'%response)
-            assert_not_equal(response,0.0)
-            net.stop()
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_mininet_switch_status_in_onos_controller(self,switches=4):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet(topo=topo, build=False)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('Pingall response is %s'%response)
-            assert_equal(response,0.0)
-            self.cliEnter()
-            devices = json.loads(self.cli.devices(jsonFormat = True))
-	    count = 0
-	    switch_ids = []
-	    for switch in net.switches:
-                dvcid = 'of:'+switch.dpid
-                switch_ids.append(dvcid)
-	    for device in devices:
-	        if str(device['id']) in switch_ids:
-	            assert_equal(str(device['available']), 'True')
-		    count += 1
-	    assert_equal(count,switches)
-            self.cliExit()
-            net.stop()
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verify_host_status_in_onos_controller_with_removing_one_mininet_host(self,switches=5):
-        try:
-	    topo = LinearTopo(switches)
-            net = Mininet( topo=topo, build=False)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('pingall response is %s'%response)
-            assert_equal(response,0.0)
-            self.cliEnter()
-            hosts = json.loads(self.cli.hosts(jsonFormat = True))
-	    log.info('Discovered Hosts are %s'%hosts)
-            assert_equal(len(hosts),switches)
-            log.info('removing host h2')
-            net.delNode(net.hosts[0])
-            hosts = json.loads(self.cli.hosts(jsonFormat = True))
-            assert_equal(len(hosts),switches-1)
-            self.cliExit()
-            net.stop()
-        except Exception as Error:
-            log.info('Got error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pushing_mac_flows_from_onos_controller_to_mininet_switches(self,switches=3):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet( topo=topo)
-            net.start()
-            egress_mac = RandMAC()._fix()
-            ingress_mac = RandMAC()._fix()
-            egress = 1
-            ingress = 2
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            self.cliEnter()
-            devices = json.loads(self.cli.devices(jsonFormat = True))
-            for switch in net.switches:
-		dvcid = 'of:'+switch.dpid
-                flow = OnosFlowCtrl(deviceId = dvcid,
-                                        egressPort = egress,
-                                        ingressPort = ingress,
-                                        ethSrc = ingress_mac,
-                                        ethDst = egress_mac)
-                result = flow.addFlow()
-                assert_equal(result, True)
-	    self.cliExit()
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pushing_ipv4_flows_from_onos_controller_to_mininet_switches(self,switches=5):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet( topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            egress = 1
-            ingress = 2
-            egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-            ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-            response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            for switch in net.switches:
-		dvcid = 'of:'+switch.dpid
-                flow = OnosFlowCtrl(deviceId = dvcid,
-                                    egressPort = egress,
-                                    ingressPort = ingress,
-                                    ethType = '0x0800',
-				    ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
-                                    ipDst = ('IPV4_DST', egress_map['ip']+'/32')
-                                    )
-                result = flow.addFlow()
-                assert_equal(result, True)
-            net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_pushing_ipv6_flows_from_onos_controller_to_mininet_switches(self,switches=5):
-	try:
-	    topo = LinearTopo(switches)
-	    net = Mininet( topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                switch.start( [ctrl] )
-            egress = 1
-            ingress = 2
-            egress_map = { 'ether': '00:00:00:00:00:03', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
-            ingress_map = { 'ether': '00:00:00:00:00:04', 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
-	    response = net.pingAll()
-            log.info('pingAll response is %s'%response)
-            for switch in net.switches:
-		dvcid = 'of:'+switch.dpid
-                flow = OnosFlowCtrl(deviceId = dvcid,
-                                    egressPort = egress,
-                                    ingressPort = ingress,
-                                    ethType = '0x86dd',
-                            	    ipSrc = ('IPV6_SRC', ingress_map['ipv6'] + '/48'),
-                            	    ipDst = ('IPV6_DST', egress_map['ipv6'] + '/48')
-                                    )
-                result = flow.addFlow()
-                assert_equal(result, True)
-            net.stop()
-   	except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_topology_created_with_50_switches_in_onos_controller(self,switches=50):
-	try:
-	    topo = LinearTopo(switches)
-	    net = Mininet(topo=topo)
-	    net.start()
-	    ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-	    for switch in net.switches:
-                switch.start([ctrl])
-	    time.sleep(5)
-	    self.cliEnter()
-            devices = json.loads(self.cli.devices(jsonFormat = True))
-	    device_list = []
-	    count = 0
-	    for device in devices:
-		device_list.append(str(device['id']))
-	    log.info('device list is %s'%device_list)
-	    for switch in net.switches:
-                switch_id = 'of:'+switch.dpid
-		if switch_id in device_list:
-		    count += 1
-	    assert_equal(count,switches)
-	    self.cliExit()
-	    net.stop()
-	except Exception as Error:
-	    log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-	Cleanup.cleanup()
-
-    def test_topology_created_with_200_switches_in_onos_controller(self,switches=200):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                log.info('switch is %s'%switch  )
-                switch.start([ctrl])
-	    time.sleep(10)
-	    self.cliEnter()
-            devices = json.loads(self.cli.devices(jsonFormat = True))
-            device_list = []
-            count = 0
-	    for device in devices:
-                device_list.append(str(device['id']))
-            log.info('device list is %s'%device_list)
-            for switch in net.switches:
-                switch_id = 'of:'+switch.dpid
-                if switch_id in device_list:
-                    count += 1
-            assert_equal(count,switches)
-	    self.cliExit()
-	    net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
-
-    def test_verifying_nodes_removed_in_mininet_status_in_onos_controller(self,switches=50, delete=20):
-        try:
-            topo = LinearTopo(switches)
-            net = Mininet(topo=topo)
-            net.start()
-            o1_ctrl = net.addController( 'onos', controller=RemoteController, ip=self.controller, port=6653)
-            for switch in net.switches:
-                log.info('switch is %s'%switch)
-                switch.start([o1_ctrl])
-	    time.sleep(5)
-	    self.cliEnter()
-            devices = json.loads(self.cli.devices(jsonFormat = True))
-            device_list = []
-            count = 0
-            for device in devices:
-                device_list.append(str(device['id']))
-            log.info('device list is %s'%device_list)
-            for switch in net.switches:
-                switch_id = 'of:'+switch.dpid
-                if switch_id in device_list:
-                    count += 1
-            assert_equal(count,switches)
-	    count = 0
-	    dltd_list = []
-	    for switch in net.switches:
-                log.info('Switch is %s'%switch)
-	        dltd_list.append('of:'+switch.dpid)
-                net.delNode(switch)
-                count += 1
-                if count == delete:
-                    break
-	    log.info('deleted switch dpid\'s %s'%dltd_list)
-	    count = 0
-	    devices = json.loads(self.cli.devices(jsonFormat = True))
-	    for device in devices:
-		if str(device['id']) in dltd_list:
-		    assert_equal(str(device['available']), 'False')
-		    count += 1
-	    assert_equal(count,delete)
-	    self.cliExit()
-	    net.stop()
-        except Exception as Error:
-            log.info('Got unexpected error %s while creating topology'%Error)
-            Cleanup.cleanup()
-            raise
-        Cleanup.cleanup()
diff --git a/src/test/monitoring/__init__.py b/src/test/monitoring/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/monitoring/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/monitoring/monitoringTest.py b/src/test/monitoring/monitoringTest.py
deleted file mode 100644
index 46b54d6..0000000
--- a/src/test/monitoring/monitoringTest.py
+++ /dev/null
@@ -1,345 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import time
-import os, subprocess
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from OnosCtrl import OnosCtrl
-from CordTestUtils import log_test as log
-from CordContainer import *
-from docker import Client
-import json
-import requests
-log.setLevel('INFO')
-
-class monitoring_exchange(unittest.TestCase):
-
-    controllers = os.getenv('ONOS_CONTROLLER_IP', '').split(',')
-    onosLogLevel = 'INFO'
-    test_host_base = 'cord-tester1'#Hardcoded temporarily
-    collectd_app = 'org.onosproject.cpman'
-    testHostName = os.getenv('TEST_HOST', test_host_base)
-    testLogLevel = os.getenv('LOG_LEVEL', onosLogLevel)
-    stat_optionList = os.getenv('USER_OPTIONS', '').split(',')
-    serverOptionsList = os.getenv('EXTERNAL_SERVER_OPTIONS', None)
-    CBENCH_TIMEOUT = 60
-
-    @classmethod
-    def setUpClass(cls):
-        onos_ctrl = OnosCtrl('org.onosproject.cpman')
-        status, _ = onos_ctrl.activate()
-
-    @classmethod
-    def tearDownClass(cls):
-        onos_ctrl = OnosCtrl('org.onosproject.cpman')
-        status, _ = onos_ctrl.deactivate()
-
-    @classmethod
-    def stat_option(cls, stats = None, serverDetails = None):
-        # each stats option we can do some specific functions
-        if stats is None:
-           stats = cls.stat_optionList
-        if serverDetails is None:
-           serverDetails = cls.serverOptionsList
-        stats_choice = 'COLLECTD'
-        test_name = cls.testHostName
-        test_image = 'cordtest/nose'
-        if stats_choice in stats:
-           onos_ctrl = OnosCtrl('org.onosproject.cpman')
-           status, _ = onos_ctrl.activate()
-           if serverDetails is '':
-              pass
-           elif serverDetails in 'NEW':
-                test_image = 'cord-test/exserver'
-                test_name ='cord-collectd'
-           else:
-               pass
-               ## TO-DO for already up and running server, install collectd agent etc...
-           cls.start_collectd_agent_in_server(name = test_name, image = test_image)
-        return
-
-
-    @classmethod
-    def collectd_agent_metrics(cls,controller=None, auth =None, url = None):
-        '''This function is getting a rules from ONOS with json formate'''
-        if url:
-           resp = requests.get(url, auth = auth)
-           log.info('CollectD agent has provided metrics via ONOS controller, \nurl = %s \nand stats = %s \nResponse = %s ' %(url,resp.json(),resp.ok))
-           assert_equal(resp.ok, True)
-        return resp
-
-
-    @classmethod
-    def start_collectd_agent_in_server(cls, name = None, image = None):
-        container_cmd_exec = Container(name = name, image = image)
-        tty = False
-        dckr = Client()
-        cmd =  'sudo /etc/init.d/collectd start'
-        i = container_cmd_exec.execute(cmd = cmd, tty= tty, stream = True, shell = False)
-        return i
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_installation(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            cmd = 'sudo /etc/init.d/collectd start'
-            output = subprocess.check_output(cmd,shell= True)
-            if 'Starting statistics collectio' in output:
-               log.info('Collectd is installed properly')
-               pass
-            else:
-               log.info('Collectd is not installed properly')
-               assert_equal(False, True)
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_plugin_and_onos_installation(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            cmd = 'ls'
-            output = subprocess.check_output(cmd,shell= True)
-            if 'write_onos' in output:
-               log.info('Collectd is installed properly and plugin happend to ONOS')
-               pass
-            else:
-               log.info('Collectd is not installed properly and no plugin happend to ONOS')
-               assert_equal(False, True)
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_cpu_stats(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            self.stat_option()
-            for controller in self.controllers:
-               if not controller:
-                  continue
-            url_cpu_stats =  'http://%s:8181/onos/cpman/controlmetrics/cpu_metrics'%(controller)
-            auth = ('karaf', 'karaf')
-            self.collectd_agent_metrics(controller, auth, url = url_cpu_stats)
-            log.info('Successfully CPU metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_mem_stats(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            self.stat_option()
-            for controller in self.controllers:
-               if not controller:
-                  continue
-            url_mem_stats =  'http://%s:8181/onos/cpman/controlmetrics/memory_metrics'%(controller)
-            auth = ('karaf', 'karaf')
-            self.collectd_agent_metrics(controller, auth, url = url_mem_stats)
-            log.info('Successfully memory metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_control_metrics_messages(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            self.stat_option()
-            for controller in self.controllers:
-               if not controller:
-                  continue
-            url_messages_stats =  'http://%s:8181/onos/cpman/controlmetrics/messages'%(controller)
-            auth = ('karaf', 'karaf')
-            self.collectd_agent_metrics(controller, auth, url = url_messages_stats)
-            log.info('Successfully messages are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_network_metrics_stats(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            self.stat_option()
-            for controller in self.controllers:
-               if not controller:
-                  continue
-            url_network_stats =  'http://%s:8181/onos/cpman/controlmetrics/network_metrics'%(controller)
-            auth = ('karaf', 'karaf')
-            self.collectd_agent_metrics(controller, auth, url = url_network_stats)
-            log.info('Successfully network metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_network_metrics_stats(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            self.stat_option()
-            for controller in self.controllers:
-               if not controller:
-                  continue
-            url_network_stats =  'http://%s:8181/onos/cpman/controlmetrics/disk_metrics'%(controller)
-            auth = ('karaf', 'karaf')
-            self.collectd_agent_metrics(controller, auth, url = url_network_stats)
-            log.info('Successfully disk metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_for_installing_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               test_image = 'cord-test/exserver'
-               test_name ='cord-collectd'
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               ## starting collectd agent on new container
-               cmd = 'sudo /etc/init.d/collectd start'
-               output = self.start_collectd_agent_in_server(name = test_name, image = test_image)
-               if output == 0:
-                  log.info('Collectd is installed properly on new container')
-                  pass
-               else:
-                  log.info('Collectd is not installed properly on new container')
-                  assert_equal(False, True)
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_for_cpu_metrics_on_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               self.stat_option()
-               for controller in self.controllers:
-                   if not controller:
-                      continue
-               url_cpu_stats =  'http://%s:8181/onos/cpman/controlmetrics/cpu_metrics'%(controller)
-               auth = ('karaf', 'karaf')
-               self.collectd_agent_metrics(controller, auth, url = url_cpu_stats)
-               log.info('Successfully CPU metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_memory_metrics_on_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               self.stat_option()
-               for controller in self.controllers:
-                   if not controller:
-                      continue
-               url_mem_stats =  'http://%s:8181/onos/cpman/controlmetrics/memory_metrics'%(controller)
-               auth = ('karaf', 'karaf')
-               self.collectd_agent_metrics(controller, auth, url = url_mem_stats)
-               log.info('Successfully memory metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_get_messages_on_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               self.stat_option()
-               for controller in self.controllers:
-                   if not controller:
-                      continue
-               url_messages_stats =  'http://%s:8181/onos/cpman/controlmetrics/messages'%(controller)
-               auth = ('karaf', 'karaf')
-               self.collectd_agent_metrics(controller, auth, url = url_messages_stats)
-               log.info('Successfully messages metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_network_metrics_on_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               self.stat_option()
-               for controller in self.controllers:
-                   if not controller:
-                      continue
-               url_network_stats =  'http://%s:8181/onos/cpman/controlmetrics/network_metrics'%(controller)
-               auth = ('karaf', 'karaf')
-               self.collectd_agent_metrics(controller, auth, url = url_network_stats)
-               log.info('Successfully network metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
-
-    @deferred(CBENCH_TIMEOUT)
-    def test_stats_with_collectd_disk_metrics_on_new_container(self):
-        df = defer.Deferred()
-        def collectd_sample(df):
-            if 'NEW' in self.serverOptionsList:
-               ## stopping collectd agent on test container if any
-               cmd = 'sudo /etc/init.d/collectd stop'
-               output = os.system(cmd)
-               self.stat_option()
-               for controller in self.controllers:
-                   if not controller:
-                      continue
-               url_disk_stats =  'http://%s:8181/onos/cpman/controlmetrics/disk_metrics'%(controller)
-               auth = ('karaf', 'karaf')
-               self.collectd_agent_metrics(controller, auth, url = url_disk_stats)
-               log.info('Successfully network metrics are retained by the stats')
-            df.callback(0)
-        reactor.callLater(0, collectd_sample, df)
-        return df
diff --git a/src/test/netCondition/__init__.py b/src/test/netCondition/__init__.py
deleted file mode 100644
index 038b5c8..0000000
--- a/src/test/netCondition/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap b/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap
deleted file mode 100644
index 1407801..0000000
--- a/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap
+++ /dev/null
Binary files differ
diff --git a/src/test/netCondition/netConditionTest.py b/src/test/netCondition/netConditionTest.py
deleted file mode 100644
index 34fc883..0000000
--- a/src/test/netCondition/netConditionTest.py
+++ /dev/null
@@ -1,2431 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from threading import Timer
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from scapy.all import *
-import time, monotonic
-import os, sys
-import tempfile
-import random
-import Queue
-import threading
-from IGMP import *
-from McastTraffic import *
-from Stats import Stats
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from Channels import IgmpChannel
-from EapTLS import TLSAuthTest
-from scapy_ssl_tls.ssl_tls import *
-from scapy_ssl_tls.ssl_tls_crypto import *
-from EapolAAA import *
-from Enum import *
-import noseTlsAuthHolder as tlsAuthHolder
-from tls_cert import Key
-from socket import *
-from CordTestServer import cord_test_radius_restart
-import struct
-import scapy
-from CordTestBase import CordTester
-from CordContainer import *
-from CordLogger import CordLogger
-from CordTestUtils import log_test
-import re
-from random import randint
-from time import sleep
-import json
-from OnosFlowCtrl import OnosFlowCtrl
-from OltConfig import OltConfig
-from threading import current_thread
-import collections
-log_test.setLevel('INFO')
-
-class IGMPTestState:
-
-      def __init__(self, groups = [], df = None, state = 0):
-            self.df = df
-            self.state = state
-            self.counter = 0
-            self.groups = groups
-            self.group_map = {} ##create a send/recv count map
-            for g in groups:
-                self.group_map[g] = (Stats(), Stats())
-
-      def update(self, group, tx = 0, rx = 0, t = 0):
-            self.counter += 1
-            index = 0 if rx == 0 else 1
-            v = tx if rx == 0 else rx
-            if self.group_map.has_key(group):
-                  self.group_map[group][index].update(packets = v, t = t)
-
-      def update_state(self):
-          self.state = self.state ^ 1
-
-class netCondition_exchange(CordLogger):
-
-    V_INF1 = 'veth0'
-    V_INF2 = 'veth1'
-    MGROUP1 = '239.1.2.3'
-    MGROUP2 = '239.2.2.3'
-    MINVALIDGROUP1 = '255.255.255.255'
-    MINVALIDGROUP2 = '239.255.255.255'
-    MMACGROUP1 = "01:00:5e:01:02:03"
-    MMACGROUP2 = "01:00:5e:02:02:03"
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.0.22'
-    NEGATIVE_TRAFFIC_STATUS = 1
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-    IGMP_TEST_TIMEOUT = 5
-    IGMP_QUERY_TIMEOUT = 60
-    MCAST_TRAFFIC_TIMEOUT = 10
-    TEST_TIMEOUT_DELAY = 340
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    max_packets = 100
-    app_igmp = 'org.opencord.igmp'
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
-    ROVER_TEST_TIMEOUT = 10 #3600*86
-    ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
-    ROVER_JOIN_TIMEOUT = 60
-
-    app_tls = 'org.opencord.aaa'
-    TLS_TIMEOUT = 20
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAM6l2jUG56pLMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
-ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjAzMTEx
-ODUzMzVaFw0xNzAzMDYxODUzMzVaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
-Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAL9Jv54TkqycL3U2Fdd/y5NXdnPVXwAVV3m6I3eIffVCv8eS+mwlbl9dnbjo
-qqlGEgA3sEg5HtnKoW81l3PSyV/YaqzUzbcpDlgWlbNkFQ3nVxh61gSU34Fc4h/W
-plSvCkwGSbV5udLtEe6S9IflP2Fu/eXa9vmUtoPqDk66p9U/nWVf2H1GJy7XanWg
-wke+HpQvbzoSfPJS0e5Rm9KErrzaIkJpqt7soW+OjVJitUax7h45RYY1HHHlbMQ0
-ndWW8UDsCxFQO6d7nsijCzY69Y8HarH4mbVtqhg3KJevxD9UMRy6gdtPMDZLah1c
-LHRu14ucOK4aF8oICOgtcD06auUCAwEAAaOCASwwggEoMB0GA1UdDgQWBBQwEs0m
-c8HARTVp21wtiwgav5biqjCBwAYDVR0jBIG4MIG1gBQwEs0mc8HARTVp21wtiwga
-v5biqqGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
-EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDOpdo1BueqSzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAK+fyAFO8CbH35P5mOX+5wf7+AeC+5pwaFcoCV0zlfwniANp
-jISgcIX9rcetLxeYRAO5com3+qLdd9dGVNL0kwufH4QhlSPErG7OLHHAs4JWVhUo
-bH3lK9lgFVlnCDBtQhslzqScR64SCicWcQEjv3ZMZsJwYLvl8unSaKz4+LVPeJ2L
-opCpmZw/V/S2NhBbe3QjTiRPmDev2gbaO4GCfi/6sCDU7UO3o8KryrkeeMIiFIej
-gfwn9fovmpeqCEyupy2JNNUTJibEuFknwx7JAX+htPL27nEgwV1FYtwI3qLiZqkM
-729wo9cFSslJNZBu+GsBP5LszQSuvNTDWytV+qY=
------END CERTIFICATE-----'''
-
-    def onos_aaa_config(self):
-        aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
-                                                                'radiusIp': '172.17.0.2' } } } }
-        radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
-        aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
-        self.onos_ctrl.activate()
-        time.sleep(2)
-        self.onos_load_tls_config(aaa_dict)
-
-    def onos_load_tls_config(self, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('Configure request for AAA returned status %d' %code)
-            assert_equal(status, True)
-            time.sleep(3)
-
-    @classmethod
-    def setUpClass(cls):
-          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-          cls.port_map, _ = cls.olt.olt_port_map()
-          OnosCtrl.cord_olt_config(cls.olt)
-          cls.device_id = OnosCtrl.get_device_id()
-
-    @classmethod
-    def tearDownClass(cls): pass
-
-    def setUp_igmp(self):
-        ''' Activate the igmp app'''
-        apps = self.app_igmp
-        self.onos_ctrl = OnosCtrl(apps)
-        self.onos_aaa_config()
-	self.onos_ctrl.activate()
-        self.igmp_channel = IgmpChannel()
-
-    def setUp_tls(self):
-        ''' Activate the aaa app'''
-        apps = self.app_tls
-        self.onos_ctrl = OnosCtrl(apps)
-        self.onos_aaa_config()
-
-    def tearDown(self):
-        '''Deactivate the dhcp app'''
-        apps = [self.app_igmp, self.app_tls]
-        for app in apps:
-            onos_ctrl = OnosCtrl(app)
-            onos_ctrl.deactivate()
-
-    def onos_load_igmp_config(self, config):
-	log_test.info('onos load config is %s'%config)
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-    def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
-          ssm_dict = {'apps' : { 'org.onosproject.igmp' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.onosproject.igmp']['ssmTranslate']
-	  if flag: #to maintain seperate group-source pair.
-	      for i in range(len(groups)):
-		  d = {}
-		  d['source'] = src_list[i] or '0.0.0.0'
-		  d['group'] = groups[i]
-		  ssm_xlate_list.append(d)
-	  else:
-              for g in groups:
-                  for s in src_list:
-                      d = {}
-                      d['source'] = s or '0.0.0.0'
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-          self.onos_load_igmp_config(ssm_dict)
-          cord_port_map = {}
-          for g in groups:
-                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
-          self.igmp_channel.cord_port_table_load(cord_port_map)
-          time.sleep(2)
-
-    def mcast_ip_range(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def random_mcast_ip(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def source_ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def get_igmp_intf(self):
-        inst = os.getenv('TEST_INSTANCE', None)
-        if not inst:
-            return 'veth0'
-        inst = int(inst) + 1
-        if inst >= self.port_map['uplink']:
-            inst += 1
-        if self.port_map.has_key(inst):
-              return self.port_map[inst]
-        return 'veth0'
-
-    def igmp_verify_join(self, igmpStateList):
-        sendState, recvState = igmpStateList
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            tx = tx_stats.count
-            assert_greater(tx, 0)
-            rx_stats = recvState.group_map[g][1]
-            rx = rx_stats.count
-            assert_greater(rx, 0)
-            log_test.info('Receive stats %s for group %s' %(rx_stats, g))
-
-        log_test.info('IGMP test verification success')
-
-    def igmp_verify_leave(self, igmpStateList, leave_groups):
-        sendState, recvState = igmpStateList[0], igmpStateList[1]
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            rx_stats = recvState.group_map[g][1]
-            tx = tx_stats.count
-            rx = rx_stats.count
-            assert_greater(tx, 0)
-            if g not in leave_groups:
-                log_test.info('Received %d packets for group %s' %(rx, g))
-        for g in leave_groups:
-            rx = recvState.group_map[g][1].count
-            assert_equal(rx, 0)
-
-        log_test.info('IGMP test verification success')
-
-    def mcast_traffic_timer(self):
-          self.mcastTraffic.stopReceives()
-
-    def send_mcast_cb(self, send_state):
-        for g in send_state.groups:
-            send_state.update(g, tx = 1)
-        return 0
-
-    ##Runs in the context of twisted reactor thread
-    def igmp_recv(self, igmpState, iface = 'veth0'):
-        p = self.recv_socket.recv()
-        try:
-              send_time = float(p.payload.load)
-              recv_time = monotonic.monotonic()
-        except:
-              log_test.info('Unexpected Payload received: %s' %p.payload.load)
-              return 0
-        #log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
-        igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
-        return 0
-
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1, ip_src = None):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              if ip_src is None:
-                 ip_pkt = self.igmp_eth/self.igmp_ip
-              else:
-                 igmp_ip_src = IP(dst = self.IP_DST, src = ip_src)
-                 ip_pkt = self.igmp_eth/igmp_ip_src
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface=iface)
-        if delay != 0:
-            time.sleep(delay)
-
-
-    def send_igmp_join_negative(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1, ip_src = None, invalid_igmp_join = None ):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        if invalid_igmp_join == 'igmp_type':
-              igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT_NEGATIVE, max_resp_code=30,
-                            gaddr=self.IP_DST)
-        else:
-              igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        if invalid_igmp_join == 'record_type':
-           record_type = IGMP_V3_GR_TYPE_INCLUDE_NEGATIVE
-
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              if ip_src is None:
-                 ip_pkt = self.igmp_eth/self.igmp_ip
-              else:
-                 igmp_ip_src = IP(dst = self.IP_DST, src = ip_src)
-                 ip_pkt = self.igmp_eth/igmp_ip_src
-        pkt = ip_pkt/igmp
-        if invalid_igmp_join == 'ttl':
-           set_ttl = 10
-           IGMPv3.fixup(pkt,invalid_ttl = set_ttl)
-        else:
-           IGMPv3.fixup(pkt)
-        sendp(pkt, iface=iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-        self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
-              gr.sources = src_list
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        if rec_queryCount == None:
-            log_test.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
-            resp = srp1(pkt, iface=iface)
-        else:
-            log_test.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
-            resp = srp1(pkt, iface=iface)
-        resp[0].summary()
-        log_test.info('Sent IGMP join for group %s and received a query packet and  printing packet' %groups)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-	log_test.info('entering into igmp leave function')
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface = iface)
-        if delay != 0:
-            time.sleep(delay)
-
-    def send_igmp_leave_listening_group_specific_query(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        log_test.info('Sending IGMP leave for group %s and waiting for one group specific query packet and printing the packet' %groups)
-        resp = srp1(pkt, iface=iface)
-        resp[0].summary()
-        log_test.info('Sent IGMP leave for group %s and received a group specific query packet and printing packet' %groups)
-        if delay != 0:
-            time.sleep(delay)
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+390)
-    def test_netCondition_with_delay_between_igmp_join_and_data_recv(self):
-        self.setUp_igmp()
-        randomDelay = randint(10,300)
-        groups = ['224.0.1.1', '225.0.0.1']
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def mcast_traffic_delay_start():
-            mcastTraffic.start()
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                result = self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_join(stateList)
-                self.df.callback(0)
-
-        self.send_igmp_join(groups)
-        log_test.info('Holding multicast data for a period of random delay = {} secs'.format(randomDelay))
-        t = Timer(randomDelay, mcast_traffic_delay_start)
-        t.start()
-
-        self.test_timer = reactor.callLater(randomDelay+30, self.mcast_traffic_timer)
-        reactor.callLater(randomDelay+10, igmp_srp_task, igmpStateList)
-        return df
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+390)
-    def test_netCondition_with_delay_between_data_recv_and_igmp_join(self):
-        self.setUp_igmp()
-        randomDelay = randint(10,300)
-        groups = ['224.0.1.1', '225.0.0.1']
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def mcast_join_delay_start():
-            log_test.info('Holding channel join for a period of random delay = {} secs'.format(randomDelay))
-            self.send_igmp_join(groups)
-
-        def igmp_srp_task(stateList):
-            igmpSendState, igmpRecvState = stateList
-            if not mcastTraffic.isRecvStopped():
-                result = self.igmp_recv(igmpRecvState)
-                reactor.callLater(0, igmp_srp_task, stateList)
-            else:
-                self.mcastTraffic.stop()
-                self.recv_socket.close()
-                self.igmp_verify_join(stateList)
-                self.df.callback(0)
-
-        mcastTraffic.start()
-        t = Timer(randomDelay, mcast_join_delay_start)
-        t.start()
-
-        self.test_timer = reactor.callLater(randomDelay+30, self.mcast_traffic_timer)
-        reactor.callLater(randomDelay+10, igmp_srp_task, igmpStateList)
-        return df
-
-
-    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+340)
-    def test_netCondition_with_delay_between_igmp_leave_and_data(self):
-        self.setUp_igmp()
-        randomDelay = randint(10,300)
-        groups = ['224.0.1.10', '225.0.0.10']
-        leave_groups = ['224.0.1.10']
-	self.onos_ssm_table_load(groups)
-        df = defer.Deferred()
-        igmpState = IGMPTestState(groups = groups, df = df)
-        igmpStateRecv = IGMPTestState(groups = groups, df = df)
-        igmpStateList = (igmpState, igmpStateRecv)
-        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb,
-                                    arg = igmpState)
-        self.df = df
-        self.mcastTraffic = mcastTraffic
-        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
-
-        def mcast_leave_delay_start():
-	    self.send_igmp_leave(leave_groups, delay = 3)
-	    join_state = IGMPTestState(groups = leave_groups)
-	    status = self.igmp_not_recv_task(self.V_INF1,leave_groups, join_state)
-	    log_test.info('Verified status for igmp recv task %s'%status)
-	    assert status == 1 , 'EXPECTED RESULT'
-	    self.df.callback(0)
-
-	mcastTraffic.start()
-	self.send_igmp_join(groups)
-        log_test.info('Holding multicast leave packet for a period of random delay = {} secs'.format(randomDelay))
-        t = Timer(randomDelay+10, mcast_leave_delay_start)
-        t.start()
-        return df
-
-    def igmp_not_recv_task(self, intf, groups, join_state):
-	  log_test.info('Entering igmp not recv task loop')
-          recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
-          group_map = {}
-          for g in groups:
-                group_map[g] = [0,0]
-
-          log_test.info('Verifying join interface should not receive any multicast data')
-          self.NEGATIVE_TRAFFIC_STATUS = 1
-          def igmp_recv_cb(pkt):
-                log_test.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
-                self.NEGATIVE_TRAFFIC_STATUS = 2
-          sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: IP in p and p[IP].dst in groups,
-                timeout = 3, opened_socket = recv_socket)
-          recv_socket.close()
-          return self.NEGATIVE_TRAFFIC_STATUS
-
-    ## Its sample test case based on this test case we had added all below scenarios.
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_delay_between_positive_IdReq_and_tlsHelloReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_delay_between_IdReq_and_tlsHelloReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+100)
-    def test_netCondition_in_eap_tls_with_delay_between_tlsHelloReq_and_eapTlsCertReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsCertReq_pkt_delay():
-            log_test.info('Holding eapTlsCertReq packet for a period of random delay = {} secs'.format(randomDelay))
-            tls._eapTlsCertReq_delay()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            while tls.server_hello_done_received == False:
-               r = tls.eapol_scapy_recv(cb = tls.eapol_server_hello_cb,
-                                      lfilter =
-                                      lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
-                                          pkt[EAP].code == EAP.REQUEST)
-               if len(r) == 0:
-                  tls.tlsFail()
-            t = Timer(randomDelay, eap_tls_eapTlsCertReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_delay_between_TlsCertReq_and_TlsChangeCipherSpec(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_TlsChangeCipherSpec_pkt_delay():
-            log_test.info('Holding TlsChangeCipherSpec packet for a period of random delay = {} secs'.format(randomDelay))
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            t = Timer(randomDelay, eap_tls_TlsChangeCipherSpec_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_no_cert_and_delay_between_IdReq_and_HelloReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_no_cert_cb():
-            log_test.info('TLS authentication failed with no certificate')
-        tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            log_test.info('Holding HelloReq packet with no cert for a period of random delay = {} secs'.format(randomDelay))
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_no_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_no_cert, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+100)
-    def test_netCondition_in_eap_tls_with_delay_and_no_cert_between_tlsHelloReq_and_eapTlsCertReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_no_cert_cb():
-            log_test.info('TLS authentication failed with no certificate')
-        tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            log_test.info('Holding eapTlsCertReq packet with no cert for a period of random delay = {} secs'.format(randomDelay))
-            tls._eapTlsCertReq_delay()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsChangeCipherSpec()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_no_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            while tls.server_hello_done_received == False:
-               r = tls.eapol_scapy_recv(cb = tls.eapol_server_hello_cb,
-                                      lfilter =
-                                      lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
-                                          pkt[EAP].code == EAP.REQUEST)
-               if len(r) == 0:
-                  tls.tlsFail()
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_no_cert, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_delay_and_no_cert_between_TlsCertReq_and_TlsChangeCipherSpec(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_no_cert_cb():
-            log_test.info('TLS authentication failed with no certificate')
-        tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
-        def eap_tls_TlsChangeCipherSpec_pkt_delay():
-            tls._eapTlsChangeCipherSpec()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_no_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            log_test.info('Holding TlsChangeCipherSpec packet with no cert for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_TlsChangeCipherSpec_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_no_cert, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_invalid_cert_and_delay_between_IdReq_and_HelloReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_invalid_cert_cb():
-            log_test.info('TLS authentication failed with invalid certificate')
-        tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb, client_cert = self.CLIENT_CERT_INVALID)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_invalid_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding HelloReq packet with invalid cert for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_invalid_cert, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+100)
-    def test_netCondition_in_eap_tls_with_invalid_cert_and_delay_between_tlsHelloReq_and_eapTlsCertReq(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_invalid_cert_cb():
-            log_test.info('TLS authentication failed with invalid certificate')
-        tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb, client_cert = self.CLIENT_CERT_INVALID)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            log_test.info('Holding eapTlsCertReq packet with invalid cert for a period of random delay = {} sec, delay'.format(randomDelay))
-            tls._eapTlsCertReq_delay()
-            tls._eapTlsChangeCipherSpec()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_invalid_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            while tls.server_hello_done_received == False:
-               r = tls.eapol_scapy_recv(cb = tls.eapol_server_hello_cb,
-                                      lfilter =
-                                      lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
-                                          pkt[EAP].code == EAP.REQUEST)
-               if len(r) == 0:
-                  tls.tlsFail()
-
-            log_test.info('Holding eapTlsCertReq packet with invalid cert for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_invalid_cert, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eap_tls_with_invalid_cert_delay_between_TlsCertReq_and_TlsChangeCipherSpec(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_invalid_cert_cb():
-            log_test.info('TLS authentication failed with invalid certificate')
-        tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb, client_cert = self.CLIENT_CERT_INVALID)
-        def eap_tls_TlsChangeCipherSpec_pkt_delay():
-            tls._eapTlsChangeCipherSpec()
-            assert_equal(tls.failTest, True)
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_invalid_cert(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            log_test.info('Holding TlsChangeCipherSpec packet with invalid cert for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_TlsChangeCipherSpec_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_invalid_cert, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_multiple_eap_tls_requests_with_delay_between_IdReq_and_HelloReq(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 10
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-                time.sleep(randomDelay)
-                tls._eapTlsHelloReq()
-                tls._eapTlsCertReq()
-                tls._eapTlsChangeCipherSpec()
-                tls._eapTlsFinished()
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           time.sleep(300)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+450)
-    def test_netCondition_with_multiple_authentication_and_delay_between_complete_authentication(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                tls._eapTlsHelloReq()
-                tls._eapTlsCertReq()
-                tls._eapTlsChangeCipherSpec()
-                tls._eapTlsFinished()
-                log_test.info('Authentication successful for user %d'%i)
-           # Client authendicating multiple times one after other and making random delay in between authendication.
-           for i in xrange(clients):
-             multiple_tls_random_delay()
-             time.sleep(randomDelay)
-           df.callback(0)
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+450)
-    def test_netCondition_with_multiple_authentication_and_delay_between_every_100_tls_burst(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        threads = []
-        tls = []
-        clients = 10
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                for x in xrange(clients):
-                   tls.append(TLSAuthTest(src_mac = 'random'))
-                for x in xrange(clients):
-                   tls[x]._eapSetup()
-                   tls[x].tlsEventTable.EVT_EAP_SETUP
-                for x in xrange(clients):
-                   tls[x]._eapStart()
-                   tls[x].tlsEventTable.EVT_EAP_START
-                for x in xrange(clients):
-                   tls[x]._eapIdReq()
-                   tls[x].tlsEventTable.EVT_EAP_ID_REQ
-                for x in xrange(clients):
-                   tls[x]._eapTlsHelloReq()
-                for x in xrange(clients):
-                   tls[x]._eapTlsCertReq()
-                for x in xrange(clients):
-                   tls[x]._eapTlsChangeCipherSpec()
-                for x in xrange(clients):
-                   tls[x]._eapTlsFinished()
-                for x in xrange(clients):
-                   log_test.info('Authentication successful for user %d'%i)
-           # Client authendicating multiple times one after other and making random delay in between authendication.
-           for i in xrange(2):
-             multiple_tls_random_delay()
-             time.sleep(randomDelay)
-           df.callback(0)
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+90)
-    def test_netCondition_with_delay_between_mac_flow_and_traffic(self):
-        df = defer.Deferred()
-        randomDelay = randint(10,300)
-        egress = 1
-        ingress = 2
-        egress_mac = '00:00:00:00:00:01'
-        ingress_mac = '00:00:00:00:00:02'
-        pkt = Ether(src = ingress_mac, dst = egress_mac)/IP()
-        self.success = False
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
-                self.success = True
-            sniff(count=2, timeout=randomDelay+50, lfilter = lambda p: p.src == ingress_mac,
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        thread = threading.Thread(target = mac_recv_task)
-
-        def send_flow_pkt_delay():
-            sendp(pkt, count=50, iface = self.port_map[ingress])
-            thread.join()
-            assert_equal(self.success, True)
-            df.callback(0)
-
-        def creating_mac_flow(df):
-
-            flow = OnosFlowCtrl(deviceId = self.device_id,
-                               egressPort = egress,
-                               ingressPort = ingress,
-                               ethSrc = ingress_mac,
-                               ethDst = egress_mac)
-            result = flow.addFlow()
-            assert_equal(result, True)
-            ##wait for flows to be added to ONOS
-            time.sleep(1)
-            thread.start()
-            log_test.info('Holding a packet to verify if flows are  active after {} secs'.format(randomDelay))
-            t = Timer(randomDelay, send_flow_pkt_delay)
-            t.start()
-        reactor.callLater(0, creating_mac_flow, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY+90)
-    def test_netCondition_with_delay_between_ip_flow_and_traffic(self):
-        df = defer.Deferred()
-        randomDelay = randint(10,300)
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        pkt = L2/L3
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True
-            sniff(count=2, timeout= randomDelay + 30,
-                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip'],
-                  prn = recv_cb, iface = self.port_map[egress])
-
-        thread = threading.Thread(target = mac_recv_task)
-
-        def send_flow_ip_pkt_delay():
-            sendp(pkt, count=50, iface = self.port_map[ingress])
-            thread.join()
-            assert_equal(self.success, True)
-            df.callback(0)
-
-        def creating_ip_flow(df):
-            flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress,
-                                ingressPort = ingress,
-                                ethType = '0x0800',
-                                ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
-                                ipDst = ('IPV4_DST', egress_map['ip']+'/32')
-                               )
-            result = flow.addFlow()
-            assert_equal(result, True)
-            ##wait for flows to be added to ONOS
-            time.sleep(1)
-            self.success = False
-            ##wait for flows to be added to ONOS
-            time.sleep(1)
-            thread.start()
-            log_test.info('Holding a packet to verify if flows are  active after {} secs'.format(randomDelay))
-            t = Timer(randomDelay, send_flow_ip_pkt_delay)
-            t.start()
-        reactor.callLater(0, creating_ip_flow, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+90)
-    def test_netCondition_with_delay_between_tcp_port_flow_and_traffic(self):
-        df = defer.Deferred()
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': 9500 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': 9000 }
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        L4 = TCP(sport = ingress_map['tcp_port'], dport = egress_map['tcp_port'])
-        pkt = L2/L3/L4
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-                self.success = True
-            sniff(count=2, timeout= randomDelay+30, lfilter = lambda p: TCP in p and p[TCP].dport == egress_map['tcp_port']
-                        and p[TCP].sport == ingress_map['tcp_port'], prn = recv_cb, iface = self.port_map[egress])
-
-        thread = threading.Thread(target = mac_recv_task)
-
-        def send_flow_tcp_pkt_delay():
-            sendp(pkt, count=50, iface = self.port_map[ingress])
-            thread.join()
-            assert_equal(self.success, True)
-
-        def creating_tcp_flow(df):
-            flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress,
-                                ingressPort = ingress,
-                                tcpSrc = ingress_map['tcp_port'],
-                                tcpDst = egress_map['tcp_port']
-                                )
-            result = flow.addFlow()
-            assert_equal(result, True)
-            ##wait for flows to be added to ONOS
-            time.sleep(1)
-            self.success = False
-            thread.start()
-            log_test.info('Holding a packet to verify if flows are active after {} sec, delay'.format(randomDelay))
-            t = Timer(randomDelay, send_flow_tcp_pkt_delay)
-            t.start()
-        df.callback(0)
-        reactor.callLater(0, creating_tcp_flow, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+90)
-    def test_netCondition_with_delay_between_udp_port_flow_and_traffic(self):
-        df = defer.Deferred()
-        randomDelay = randint(10,300)
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': 9500 }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': 9000 }
-        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-        L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
-        pkt = L2/L3/L4
-
-        def mac_recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-                self.success = True
-            sniff(count=2, timeout=randomDelay + 30,
-             lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
-                                and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.port_map[egress])
-
-        thread = threading.Thread(target = mac_recv_task)
-
-        def send_flow_udp_pkt_delay():
-            sendp(pkt, count=50, iface = self.port_map[ingress])
-            thread.join()
-            assert_equal(self.success, True)
-            df.callback(0)
-
-        def creating_udp_flow(df):
-            flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress,
-                                ingressPort = ingress,
-                                udpSrc = ingress_map['udp_port'],
-                                udpDst = egress_map['udp_port']
-                               )
-            result = flow.addFlow()
-            assert_equal(result, True)
-            ##wait for flows to be added to ONOS
-            time.sleep(1)
-            self.success = False
-            thread.start()
-            log_test.info('Holding a packet to verify if flows are active after {} secs'.format(randomDelay))
-            t = Timer(randomDelay, send_flow_udp_pkt_delay)
-            t.start()
-
-        df.callback(0)
-        reactor.callLater(0, creating_udp_flow, df)
-        return df
-
-    def netCondition_with_delay_between_multiple_igmp_joins_and_data(self,users,group_end_ip,source_list_end_ip,user_src_end_ip, data_pkt =50):
-        self.setUp_igmp()
-        randomDelay = []
-        groups = []
-        sources = []
-        subscribers_src_ip = []
-        status = []
-        join_threads = []
-        delay_threads = []
-        data_threads = []
-        threads = []
-        subscriber = users
-        count = 1
-        mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = group_end_ip)
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = source_list_end_ip)
-        subscriber_sourceips = self.source_ip_range(start_ip = '20.20.0.1',end_ip = user_src_end_ip)
-        while count<=subscriber:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            subscriber_sourceip = random.choice(subscriber_sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group = %s source list = %s and subscriber source ip in join = %s'%(group,source, subscriber_sourceip))
-                groups.append(group)
-                sources.append(source)
-                subscribers_src_ip.append(subscriber_sourceip)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-
-        def multiple_joins_send_in_threads(group, source, subscriber_src_ip,data_pkt = data_pkt):
-            self.send_igmp_join(groups = [group], src_list = [source],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1, ip_src = [subscriber_src_ip])
-            randomDelay_in_thread = randint(10,30)
-            log_test.info('This is running in a thread, with  igmp join sent and delay {}'.format(randomDelay_in_thread))
-            time.sleep(randomDelay_in_thread)
-            log_test.info('This is running in a thread, with igmp join sent and delay {}'.format(randomDelay_in_thread))
-            status = self.verify_igmp_data_traffic_in_thread(group,intf=self.V_INF1,source=source, data_pkt = data_pkt)
-            log_test.info('Data received for group %s from source %s and status is %s '%(group,source,status))
-            self.igmp_threads_result.append(status)
-
-        for i in range(subscriber):
-            thread = threading.Thread(target = multiple_joins_send_in_threads, args = (groups[i], sources[i], subscribers_src_ip[i]))
-            time.sleep(randint(1,2))
-            thread.start()
-            threads.append(thread)
-
-        for thread in threads:
-            thread.join()
-
-    def verify_igmp_data_traffic_in_thread(self, group, intf='veth0', source='1.2.3.4', data_pkt =50, negative = None):
-        log_test.info('Verifying multicast traffic for group %s from source %s'%(group,source))
-        self.success = False
-        def recv_task():
-            def igmp_recv_cb(pkt):
-                #log_test.info('received multicast data packet is %s'%pkt.show())
-                log_test.info('Multicast data received for group %s from source %s'%(group,source))
-                self.success = True
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
-        t = threading.Thread(target = recv_task)
-        t.start()
-        self.send_multicast_data_traffic_from_thread(group,source=source, data_pkt=data_pkt)
-        t.join()
-        if (negative is None) and self.success is True:
-           return self.success
-        elif (negative is not None) and self.success is True:
-           log_test.info('Multicast traffic should not received because this is negative scenario, but it is received')
-           self.success = False
-        elif (negative is not None) and self.success is False:
-           log_test.info('Multicast traffic should is not received because this is negative scenario, hence status is True')
-           self.success = True
-        return self.success
-
-    def send_multicast_data_traffic_from_thread(self, group, intf= 'veth2',source = '1.2.3.4', data_pkt = 50):
-        dst_mac = self.iptomac_convert(group)
-        eth = Ether(dst= dst_mac)
-        ip = IP(dst=group,src=source)
-        data = repr(monotonic.monotonic())
-        log_test.info('Sending %s number of multicast packet to the multicast group %s'%(data_pkt, group))
-        sendp(eth/ip/data,count=data_pkt, iface = intf)
-        pkt = (eth/ip/data)
-        #log_test.info('multicast traffic packet %s'%pkt.show())
-
-    def iptomac_convert(self, mcast_ip):
-        mcast_mac =  '01:00:5e:'
-        octets = mcast_ip.split('.')
-        second_oct = int(octets[1]) & 127
-        third_oct = int(octets[2])
-        fourth_oct = int(octets[3])
-        mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
-        return mcast_mac
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_delay_between_multiple_igmp_joins_and_data_for_multiple_subscribers(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            no_users = 10
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_delay_between_multiple_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip,
-                                                                          source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_delay_between_multiple_igmp_joins_and_data_from_multiple_subscribers_with_low_multicast_data_rate(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            no_users = 10
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_delay_between_multiple_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip,
-                                             source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip, data_pkt = 20)
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self.igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_delay_between_multiple_igmp_joins_and_data_for_same_subscriber(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            no_users = 5
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.0.1'
-            self.netCondition_with_delay_between_multiple_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip,
-                                                                          source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_delay_between_same_igmp_joins_and_data_from_multiple_subscriber(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            no_users = 100
-            group_end_ip = '229.0.0.1'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_delay_between_multiple_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip,
-                                                                          source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_delay_between_multiple_igmp_joins_and_data_from_same_sourcelist_for_multiple_subscriber(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            no_users = 20
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.0.1'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_delay_between_multiple_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip,
-                                                                          source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-
-    def netCondition_with_multiple_scenarios_igmp_joins_and_data(self,users,group_end_ip,source_list_end_ip,user_src_end_ip,bunch_traffic, data_pkt =50,invalid_joins = None):
-        self.setUp_igmp()
-        randomDelay = []
-        groups = []
-        sources = []
-        subscribers_src_ip = []
-        status = []
-        join_threads = []
-        delay_threads = []
-        data_threads = []
-        threads = []
-        subscriber = users
-        count = 1
-        j = 1
-        negative_traffic = None
-        mcastips = self.mcast_ip_range(start_ip = '229.0.0.1',end_ip = group_end_ip)
-        sourceips = self.source_ip_range(start_ip = '10.10.0.1',end_ip = source_list_end_ip)
-        subscriber_sourceips = self.source_ip_range(start_ip = '20.20.0.1',end_ip = user_src_end_ip)
-        while count<=subscriber:
-            group = random.choice(mcastips)
-            source = random.choice(sourceips)
-            subscriber_sourceip = random.choice(subscriber_sourceips)
-            if group in groups:
-                pass
-            else:
-                log_test.info('group = %s source list = %s and subscriber source ip in join = %s'%(group,source, subscriber_sourceip))
-                groups.append(group)
-                sources.append(source)
-                subscribers_src_ip.append(subscriber_sourceip)
-                count += 1
-        self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-        def multiple_joins_send_in_threads(group, source, subscriber_src_ip,invalid_igmp_join,data_pkt = data_pkt):
-            if invalid_igmp_join is None:
-               self.send_igmp_join(groups = [group], src_list = [source],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1, ip_src = [subscriber_src_ip])
-            else:
-               negative_traffic = True
-               self.send_igmp_join_negative(groups = [group], src_list = [source],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                           iface = self.V_INF1, ip_src = [subscriber_src_ip], invalid_igmp_join = invalid_igmp_join)
-            randomDelay_in_thread = randint(10,30)
-            log_test.info('This is running in thread with igmp join sent and delay {}'.format(randomDelay_in_thread))
-            time.sleep(randomDelay_in_thread)
-            log_test.info('This is running in thread with igmp join sent and delay {}'.format(randomDelay_in_thread))
-            status = self.verify_igmp_data_traffic_in_thread(group,intf=self.V_INF1,source=source, data_pkt = data_pkt,negative=negative_traffic)
-            log_test.info('data received for group %s from source %s and status is %s '%(group,source,status))
-            self.igmp_threads_result.append(status)
-        for i in range(subscriber):
-            thread = threading.Thread(target = multiple_joins_send_in_threads, args = (groups[i], sources[i], subscribers_src_ip[i], invalid_joins))
-            if bunch_traffic ==  'yes':
-               if j == 10:
-                  log_test.info('Here we are throttle traffic for 100 sec of delay and agian creating igmp threads')
-                  time.sleep(randint(100,110))
-                  j = 1
-               else:
-                  j = j+ 1
-            time.sleep(randint(1,2))
-            thread.start()
-            threads.append(thread)
-        for thread in threads:
-            thread.join()
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_throttle_between_multiple_igmp_joins_and_data_from_multiple_subscribers(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            batch_traffic_run = 'yes'
-            no_users = 11
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_multiple_scenarios_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip, source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip, bunch_traffic = batch_traffic_run, data_pkt = 50 )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_invalid_igmp_type_multiple_igmp_joins_and_data_from_multiple_subscribers(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            batch_traffic_run = 'no'
-            invalid_igmp_join = 'igmp_type'
-            no_users = 11
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_multiple_scenarios_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip, source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip, bunch_traffic = batch_traffic_run, data_pkt = 50, invalid_joins = invalid_igmp_join )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_invalid_record_type_multiple_igmp_joins_and_data_from_multiple_subscribers(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            batch_traffic_run = 'no'
-            invalid_igmp_join = 'record_type'
-            no_users = 11
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_multiple_scenarios_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip, source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip, bunch_traffic = batch_traffic_run, data_pkt = 50, invalid_joins = invalid_igmp_join )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_with_invalid_ttl_and_multiple_igmp_joins_and_data_from_multiple_subscribers(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        log_test.info('IGMP Thread status before running igmp thread %s '%(self.igmp_threads_result))
-        def netCondition_multiple_igmp_joins_and_data(df):
-            ### Start ips of multicast, source list and subscriber source ip are '229.0.0.1', '10.10.0.1' and '20.20.0.1' respectively
-            batch_traffic_run = 'no'
-            invalid_igmp_join = 'ttl_type'
-            no_users = 11
-            group_end_ip = '229.0.30.254'
-            source_list_end_ip = '10.10.30.254'
-            subscriber_src_end_ip = '20.20.20.254'
-            self.netCondition_with_multiple_scenarios_igmp_joins_and_data(users = no_users, group_end_ip = group_end_ip, source_list_end_ip = source_list_end_ip, user_src_end_ip = subscriber_src_end_ip, bunch_traffic = batch_traffic_run, data_pkt = 10, invalid_joins = invalid_igmp_join )
-            log_test.info('IGMP Thread status after running igmp thread %s '%(self. igmp_threads_result))
-            for i in xrange(no_users):
-               log_test.info('IGMP Thread %s status is %s after running igmp thread '%(i,self.igmp_threads_result[i]))
-               if assert_equal(self.igmp_threads_result[i], True) is True:
-                  df.callback(0)
-            df.callback(0)
-        reactor.callLater(0, netCondition_multiple_igmp_joins_and_data, df)
-        return df
-
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_multiple_eap_tls_sessions_with_out_of_order_exchanges_between_serverHello_and_client_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                tls._eapTlsCertReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsHelloReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsChangeCipherSpec()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsFinished()
-                assert_equal(tls.failTest, True)
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_multiple_eap_tls_session_with_out_of_order_exchanges_in_eapTlsCertReq_packets(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapTlsCertReq()
-                assert_equal(tls.failTest, True)
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                assert_equal(tls.failTest, True)
-                tls._eapTlsCertReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsHelloReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsChangeCipherSpec()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsFinished()
-                assert_equal(tls.failTest, True)
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_multiple_eap_tls_sessions_with_out_of_order_eapTlsChangeCipherSpec_packets(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapTlsChangeCipherSpec()
-                tls.failTest = False
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                assert_equal(tls.failTest, True)
-                tls._eapTlsHelloReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsCertReq()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsChangeCipherSpec()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsFinished()
-                assert_equal(tls.failTest, True)
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_multiple_eap_tls_sessions_dropping_eapTlsHelloReq_packets(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                #tls._eapTlsHelloReq()
-                tls._eapTlsCertReq()
-                tls._eapTlsChangeCipherSpec()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsFinished()
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_multiple_eap_tls_sessions_dropping_eapTlsChangeCipherSpec_packets(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        clients = 100
-        def eap_tls_eapTlsHelloReq_pkt_delay(df):
-           def multiple_tls_random_delay():
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                tls._eapTlsHelloReq()
-                tls._eapTlsCertReq()
-                #tls._eapTlsChangeCipherSpec()
-                assert_equal(tls.failTest, True)
-                tls._eapTlsFinished()
-                log_test.info('Authentication successful for user %d'%i)
-           # Sending multiple tls clients and making random delay in between client and server packets.
-           for i in xrange(clients):
-             thread = threading.Thread(target = multiple_tls_random_delay)
-             time.sleep(randint(1,2))
-             thread.start()
-             threads.append(thread)
-           for thread in threads:
-               thread.join()
-        reactor.callLater(0, eap_tls_eapTlsHelloReq_pkt_delay, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY+50)
-    def test_netCondition_in_eapol_tls_with_invalid_eapol_version_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            EapolPacket.eap_tls_packets_field_value_replace(invalid_field_name= 'eapolTlsVersion')
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            EapolPacket.eap_invalid_tls_packets_info(invalid_field_name= 'eapolTlsVersion', invalid_field_value= 20)
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eapol_tls_with_invalid_eapol_tls_type_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            EapolPacket.eap_tls_packets_field_value_replace(invalid_field_name= 'eapolTlsType')
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            EapolPacket.eap_invalid_tls_packets_info(invalid_field_name= 'eapolTlsType', invalid_field_value= 20)
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eapol_tls_with_invalid_eapol_type_ID_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            EapolPacket.eap_tls_packets_field_value_replace(invalid_field_name= 'eapolTypeID')
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            EapolPacket.eap_invalid_tls_packets_info(invalid_field_name= 'eapolTypeID', invalid_field_value= 20)
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eapol_tls_with_invalid_eapol_response_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        tls = TLSAuthTest()
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            EapolPacket.eap_tls_packets_field_value_replace(invalid_field_name= 'eapolResponse')
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            EapolPacket.eap_invalid_tls_packets_info(invalid_field_name= 'eapolResponse', invalid_field_value= 20)
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_invalid_eap_content_type_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_invalid_content_type_cb():
-            log_test.info('TLS authentication failed with invalid content type in TLSContentType packet')
-        tls = TLSAuthTest(fail_cb = tls_invalid_content_type_cb, invalid_content_type = 44)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_eap_tls_with_invalid_tls_version_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_invalid_eap_tls_version_in_client_auth_packet():
-            log_test.info('TLS authentication failed with invalid tls version field in the packet')
-        tls = TLSAuthTest(fail_cb = tls_invalid_eap_tls_version_in_client_auth_packet, version = 'TLS_2_1')
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_eap_tls_with_invalid_tls_cipher_suite_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_with_invalid_tls_cipher_suite_field_in_client_auth_packet_cb():
-            log_test.info('TLS authentication failed with invalid tls cipher suite field in the packet')
-        tls = TLSAuthTest(fail_cb = tls_with_invalid_tls_cipher_suite_field_in_client_auth_packet_cb, cipher_suite = 'RSA_WITH_AES_512_CBC_SHA')
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_id_mismatch_in_identifier_field_in_client_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_with_invalid_id_in_identifier_response_packet_cb():
-            log_test.info('TLS authentication failed with invalid id in identifier packet')
-        tls = TLSAuthTest(fail_cb = tls_with_invalid_id_in_identifier_response_packet_cb,
-                              id_mismatch_in_identifier_response_packet = True)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_id_mismatch_in_client_hello_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_with_invalid_id_in_client_hello_packet_cb():
-             log_test.info('TLS authentication failed with invalid id in client hello packet')
-        tls = TLSAuthTest(fail_cb = tls_with_invalid_id_in_client_hello_packet_cb,
-                              id_mismatch_in_client_hello_packet = True)
-
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_invalid_client_hello_handshake_type_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_incorrect_handshake_type_client_hello_cb():
-            log_test.info('TLS authentication failed with incorrect handshake type in client hello packet')
-        tls = TLSAuthTest(fail_cb = tls_incorrect_handshake_type_client_hello_cb, invalid_client_hello_handshake_type=True)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_invalid_client_cert_req_handshake_auth_packet(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_incorrect_handshake_type_certificate_request_cb():
-            log_test.info('TLS authentication failed with incorrect handshake type in client certificate request packet')
-        tls = TLSAuthTest(fail_cb = tls_incorrect_handshake_type_certificate_request_cb, invalid_cert_req_handshake = True)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-150)
-    def test_netCondition_in_eap_tls_with_invalid_client_key_ex_replacing_server_key_ex(self):
-        self.setUp_tls()
-        randomDelay = randint(10,300)
-        df = defer.Deferred()
-        def tls_clientkeyex_replace_with_serverkeyex_cb():
-            log_test.info('TLS authentication failed with client key exchange replaced with server key exchange')
-        tls = TLSAuthTest(fail_cb = tls_clientkeyex_replace_with_serverkeyex_cb,clientkeyex_replace_with_serverkeyex=True)
-        def eap_tls_eapTlsHelloReq_pkt_delay():
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            df.callback(0)
-        def eap_tls_verify(df):
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            log_test.info('Holding tlsHelloReq packet for a period of random delay = {} secs'.format(randomDelay))
-            t = Timer(randomDelay, eap_tls_eapTlsHelloReq_pkt_delay)
-            t.start()
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    def tcpreplay_radius_server_packets_from_pcap_file(self, pcap_file_path =None, error_pkt = None):
-        #default radius server packets in path in test/netCondition/xxx.pcap file
-        if pcap_file_path and (error_pkt == None):
-           pcap_file_path = pcap_file_path
-        elif error_pkt == None:
-           pcap_file_path = "/root/test/src/test/netCondition/tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-        elif error_pkt:
-           pcap_file_path = "/root/test/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-        log_test.info('Started replaying pcap file packets on docker0 interface using tcprelay linux command')
-        time.sleep(0.4)
-        sendp(rdpcap(pcap_file_path), iface="eth0", loop=0, inter=1)
-        time.sleep(5)
-        log_test.info('Replayed pcap file packets on docker0 interface')
-
-    def tcpreplay_radius_server_error_packets_from_pcap_file(self, pcap_file_path =None, error_pkt = None):
-        #default radius server packets in path in test/netCondition/xxx.pcap file
-        if pcap_file_path:
-           pcap_file_path = pcap_file_path
-        else:
-           pcap_file_path = "/root/test/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-        log_test.info('Started replaying pcap file error packets on docker0 interface using tcprelay linux command')
-        time.sleep(0.4)
-        sendp(rdpcap(pcap_file_path), iface="eth0", loop=0, inter=1)
-        time.sleep(5)
-        return 'success'
-
-    def emulating_invalid_radius_server_packets_from_pcap_file(self, pcap_file_path =None, pkt_no = None,L2 = None, L3 =None, L4=None, no_of_radius_attribute=None):
-        #default radius server packets in path in test/netCondition/xxx.pcap file
-        random_port = 1222
-        if pcap_file_path:
-           pcap_file_path = pcap_file_path
-        else:
-           pcap_file_path = "/root/test/src/test/netCondition/tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-        log_test.info('Started corrupting tls server packet no = {}'.format(pkt_no))
-        radius_server_pkts = rdpcap(pcap_file_path)
-        error_server_pkt = radius_server_pkts[pkt_no]
-        if pkt_no == 0:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][2].value =  '\n\xd8\xf0\xbbW\xd6$;\xd2s\xd5\xc5Ck\xd5\x01'
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-        if pkt_no == 1:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][2].type =  79
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-
-        if pkt_no == 2:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][1].type =  79
-              error_server_pkt[3][2].len =  18
-              error_server_pkt[3][2].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][3].len =  18
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xff\xeb\x99\xa77\xfc\xa3\xe9'
-
-        if pkt_no == 3:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][1].type =  79
-              error_server_pkt[3][2].len =  18
-              error_server_pkt[3][2].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][3].len =  18
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xff\xeb\x99\xa77\xfc\xa3\xe9'
-
-        if pkt_no == 4:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][1].type =  79
-              error_server_pkt[3][2].len =  18
-              error_server_pkt[3][2].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][3].len =  18
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xff\xeb\x99\xa77\xfc\xa3\xe9'
-
-        if pkt_no == 5:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][1].type =  79
-              error_server_pkt[3][2].len =  18
-              error_server_pkt[3][2].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][3].len =  18
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xff\xeb\x99\xa77\xfc\xa3\xe9'
-
-        if pkt_no == 6:
-           if L4:
-              error_server_pkt[UDP].sport = random_port
-              error_server_pkt[UDP].dport = random_port
-
-           if no_of_radius_attribute:
-              error_server_pkt[3][1].type =  79
-              error_server_pkt[3][2].len =  18
-              error_server_pkt[3][2].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][3].len =  18
-              error_server_pkt[3][3].value =  'R\x1d`#R\x1cm[\xfd\xeb\xb9\xa84\xfc\xa3\xe9'
-              error_server_pkt[3][5].len =  18
-              error_server_pkt[3][5].value =  'R\x1d`#R\x1cm[\xff\xeb\x99\xa77\xfc\xa3\xe9'
-
-
-        error_server_pkt.show()
-        radius_server_pkts[pkt_no] = error_server_pkt
-        wrpcap("/root/test/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap", radius_server_pkts)
-        pcap_file_path = "/root/test/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-
-        log_test.info('Done corrupting tls server packet no = {} send back filepath along with file name'.format(pkt_no))
-        return pcap_file_path
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_emulating_server_packets_without_radius_server_container(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        threads = []
-        threads_status = []
-        clients = 1
-        radius_image = 'cordtest/radius:candidate'
-        delay = 20
-        que = Queue.Queue()
-        def eap_tls_emulating_server_pkts(df):
-           def tls_client_packets(start):
-                time.sleep(0.2)
-                randomDelay = randint(10,300)
-                tls = TLSAuthTest(src_mac = 'random')
-                tls._eapSetup()
-                tls.tlsEventTable.EVT_EAP_SETUP
-                tls._eapStart()
-                tls.tlsEventTable.EVT_EAP_START
-                tls._eapIdReq()
-                tls.tlsEventTable.EVT_EAP_ID_REQ
-                tls._eapTlsHelloReq()
-                tls._eapTlsCertReq()
-                tls._eapTlsChangeCipherSpec()
-                tls._eapTlsFinished()
-                if tls.failTest == False:
-                   log_test.info('Authentication successful for user')
-                   return 'success'
-                else:
-                   log_test.info('Authentication not successful for user')
-                   return 'failed'
-           thread_client = threading.Thread(target=lambda q, arg1: q.put(tls_client_packets(arg1)), args=(que, 'start'))
-           thread_radius = threading.Thread(target = Container.pause_container, args = (radius_image,delay))
-           thread_tcpreplay = threading.Thread(target = self.tcpreplay_radius_server_packets_from_pcap_file)
-           threads.append(thread_radius)
-           threads.append(thread_client)
-           threads.append(thread_tcpreplay)
-           for thread in threads:
-               thread.start()
-           for thread in threads:
-               thread.join()
-           while not que.empty():
-              threads_status = que.get()
-           assert_equal(threads_status, 'success')
-           df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-
-    def eap_tls_emulating_server_pkts_negative_testing(df,msg):
-        threads = []
-        threads_status = []
-        clients = 1
-        radius_image = 'cordtest/radius:candidate'
-        delay = 20
-        que = Queue.Queue()
-        def tls_client_packets(start):
-            time.sleep(0.2)
-            randomDelay = randint(10,300)
-            def tls_invalid_server_packets_scenario_cb():
-                log_test.info('TLS authentication failed with {}'.format(msg))
-            tls = TLSAuthTest(fail_cb = tls_invalid_server_packets_scenario_cb, src_mac = 'random')
-            tls._eapSetup()
-            tls.tlsEventTable.EVT_EAP_SETUP
-            tls._eapStart()
-            tls.tlsEventTable.EVT_EAP_START
-            tls._eapIdReq()
-            tls.tlsEventTable.EVT_EAP_ID_REQ
-            tls._eapTlsHelloReq()
-            tls._eapTlsCertReq()
-            tls._eapTlsChangeCipherSpec()
-            tls._eapTlsFinished()
-            if tls.failTest == True:
-               log_test.info('Authentication not successful for user')
-               return 'failed'
-            else:
-               log_test.info('Authentication successful for user')
-               return 'success'
-        def tcpreplay_radius_server_error_packets_from_pcap_file(pcap_file_path =None, error_pkt = None):
-            #default radius server packets in path in test/netCondition/xxx.pcap file
-            if pcap_file_path:
-               pcap_file_path = pcap_file_path
-            else:
-               pcap_file_path = "/root/test/src/test/netCondition/error_tls_auth_exhange_packets_Radius_server_packets_only.pcap"
-
-            log_test.info('Started replaying pcap file error packets on docker0 interface using tcprelay linux command')
-            time.sleep(0.4)
-            sendp(rdpcap(pcap_file_path), iface="eth0", loop=0, inter=1)
-            time.sleep(5)
-            return 'success'
-        thread_client = threading.Thread(target=lambda q, arg1: q.put(tls_client_packets(arg1)), args=(que, 'start'))
-        thread_radius = threading.Thread(target = Container.pause_container, args = (radius_image,delay))
-        thread_tcpreplay = threading.Thread(target = tcpreplay_radius_server_error_packets_from_pcap_file)
-        threads.append(thread_radius)
-        threads.append(thread_client)
-        threads.append(thread_tcpreplay)
-        for thread in threads:
-            thread.start()
-        for thread in threads:
-            thread.join()
-        while not que.empty():
-            threads_status = que.get()
-        assert_equal(threads_status, 'failed')
-
-
-    @deferred(TEST_TIMEOUT_DELAY-250)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_server_eapid_response_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 0, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping server eapId response')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_server_eapid_response_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 0, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid server eapId response')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_server_hello_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 1, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping server hello packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_server_hello_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 1, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid server hello packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_client_certficate_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 2, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping client certificate access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_client_certficate_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 2, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid client certificate access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_client_certficate_with_2nd_fragment_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 3, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping client certificate with 2nd fragment access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_client_certficate_with_2nd_fragment_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 3, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid client certificate for 2nd fragment access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_client_certficate_with_3rd_fragment_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 4, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping client certificate for 3rd fragment access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_client_certficate_with_3rd_fragment_access_challenge_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 4, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid client certificate for 3rd fragment access challenge packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_cipher_suite_request_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 5, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping cipher suite request server packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_cipher_suite_request_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 5, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid cipher suite request server packet')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_dropped_access_accept_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 6, L4 = True)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'dropping access accept server packet ')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
-
-    @deferred(TEST_TIMEOUT_DELAY-50)
-    def test_netCondition_in_eap_tls_with_valid_client_and_invalid_access_accept_server_packet(self):
-        self.setUp_tls()
-        df = defer.Deferred()
-        self.emulating_invalid_radius_server_packets_from_pcap_file(pkt_no = 6, no_of_radius_attribute = 1)
-        def eap_tls_emulating_server_pkts(df):
-            self.eap_tls_emulating_server_pkts_negative_testing(msg = 'invalid access accept server packet ')
-            df.callback(0)
-        reactor.callLater(0, eap_tls_emulating_server_pkts, df)
-        return df
diff --git a/src/test/netCondition/tls_auth_exhange_packets_Radius_server_packets_only.pcap b/src/test/netCondition/tls_auth_exhange_packets_Radius_server_packets_only.pcap
deleted file mode 100644
index cd700d2..0000000
--- a/src/test/netCondition/tls_auth_exhange_packets_Radius_server_packets_only.pcap
+++ /dev/null
Binary files differ
diff --git a/src/test/onboarding/__init__.py b/src/test/onboarding/__init__.py
deleted file mode 100644
index d370d7c..0000000
--- a/src/test/onboarding/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
-
-#from nose import main as nosetest_main
-#from CordTestConfig import CordTestConfigRestore
-#nosetest_main(addplugins = [ CordTestConfigRestore() ])
diff --git a/src/test/onboarding/onboardingTest.py b/src/test/onboarding/onboardingTest.py
deleted file mode 100644
index 906ff0e..0000000
--- a/src/test/onboarding/onboardingTest.py
+++ /dev/null
@@ -1,1302 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import os,sys
-import keystoneclient.v2_0.client as ksclient
-import keystoneclient.apiclient.exceptions
-import neutronclient.v2_0.client as nclient
-import neutronclient.common.exceptions
-from novaclient import client as nova_client
-from neutronclient.v2_0 import client as neutron_client
-import neutronclient.v2_0.client as neutronclient
-from nose.tools import assert_equal, assert_not_equal
-from twisted.internet import defer
-from nose.twistedtools import reactor, deferred
-from CordTestUtils import *
-from onosclidriver import OnosCliDriver
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnboardingServiceUtils import OnboardingServiceUtils
-from SSHTestAgent import SSHTestAgent
-from CordTestConfig import setup_module, running_on_ciab, teardown_module
-from CordLogger import CordLogger
-from CordTestUtils import *
-from CordTestUtils import log_test as log
-import requests
-import time
-import json
-from VSGAccess import VSGAccess
-log.setLevel('INFO')
-
-class onboarding_exchange(CordLogger):
-    ONOS_INSTANCES = 3
-    V_INF1 = 'veth0'
-    device_id = 'of:' + get_mac()
-    TEST_IP = '8.8.8.8'
-    HOST = "10.1.0.1"
-    USER = "vagrant"
-    PASS = "vagrant"
-    head_node = os.getenv('HEAD_NODE', 'prod')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    on_pod = running_on_pod()
-    vm_name = 'mysite_exampleservice'
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    restApiXos =  None
-    subscriber_account_num = 200
-    subscriber_s_tag = 304
-    subscriber_c_tag = 304
-    subscribers_per_s_tag = 8
-    subscriber_map = {}
-    subscriber_info = []
-    volt_subscriber_info = []
-    restore_methods = []
-    FABRIC_PORT_HEAD_NODE = 1
-    FABRIC_PORT_COMPUTE_NODE = 2
-    APP_NAME = 'org.ciena.xconnect'
-    APP_FILE = os.path.join(test_path, '..', 'apps/xconnect-1.0-SNAPSHOT.oar')
-    NUM_SUBSCRIBERS = 5
-
-    @classmethod
-    def getSubscriberCredentials(cls, subId):
-        """Generate our own account num, s_tag and c_tags"""
-        if subId in cls.subscriber_map:
-            return cls.subscriber_map[subId]
-        account_num = cls.subscriber_account_num
-        cls.subscriber_account_num += 1
-        s_tag, c_tag = cls.subscriber_s_tag, cls.subscriber_c_tag
-        cls.subscriber_c_tag += 1
-        if cls.subscriber_c_tag % cls.subscribers_per_s_tag == 0:
-            cls.subscriber_s_tag += 1
-        cls.subscriber_map[subId] = account_num, s_tag, c_tag
-        return cls.subscriber_map[subId]
-
-    @classmethod
-    def getXosCredentials(cls):
-        onos_cfg = OnosCtrl.get_config()
-        if onos_cfg is None:
-            return None
-        if 'apps' in onos_cfg and \
-           'org.opencord.vtn' in onos_cfg['apps'] and \
-           'cordvtn' in onos_cfg['apps']['org.opencord.vtn'] and \
-           'xos' in onos_cfg['apps']['org.opencord.vtn']['cordvtn']:
-            xos_cfg = onos_cfg['apps']['org.opencord.vtn']['cordvtn']['xos']
-            endpoint = xos_cfg['endpoint']
-            user = xos_cfg['user']
-            password = xos_cfg['password']
-            xos_endpoints = endpoint.split(':')
-            xos_host = xos_endpoints[1][len('//'):]
-            xos_port = xos_endpoints[2][:-1]
-            #log.info('xos_host: %s, port: %s, user: %s, password: %s' %(xos_host, xos_port, user, password))
-            return dict(host = xos_host, port = xos_port, user = user, password = password)
-
-        return None
-    @classmethod
-    def getSubscriberConfig(cls, num_subscribers):
-        features =  {
-            'cdn': True,
-            'uplink_speed': 1000000000,
-            'downlink_speed': 1000000000,
-            'uverse': True,
-            'status': 'enabled'
-        }
-        subscriber_map = []
-        for i in xrange(num_subscribers):
-            subId = 'sub{}'.format(i)
-            account_num, _, _ = cls.getSubscriberCredentials(subId)
-            identity = { 'account_num' : str(account_num),
-                         'name' : 'My House {}'.format(i)
-                         }
-            sub_info = { 'features' : features,
-                         'identity' : identity
-                         }
-            subscriber_map.append(sub_info)
-
-        return subscriber_map
-
-    @classmethod
-    def getVoltSubscriberConfig(cls, num_subscribers):
-        voltSubscriberMap = []
-        for i in xrange(num_subscribers):
-            subId = 'sub{}'.format(i)
-            account_num, s_tag, c_tag = cls.getSubscriberCredentials(subId)
-            voltSubscriberInfo = {}
-            voltSubscriberInfo['voltTenant'] = dict(s_tag = str(s_tag),
-                                                    c_tag = str(c_tag),
-                                                    subscriber = '')
-            voltSubscriberInfo['account_num'] = account_num
-            voltSubscriberMap.append(voltSubscriberInfo)
-
-        return voltSubscriberMap
-
-    @classmethod
-    def setUpClass(cls):
-        OnboardingServiceUtils.setUp()
-        cls.controllers = get_controllers()
-        cls.controller = cls.controllers[0]
-        cls.cli = None
-        cls.on_pod = running_on_pod()
-        cls.on_ciab = running_on_ciab()
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.vcpes = cls.olt.get_vcpes()
-        cls.vcpes_dhcp = cls.olt.get_vcpes_by_type('dhcp')
-        cls.vcpes_reserved = cls.olt.get_vcpes_by_type('reserved')
-        cls.dhcp_vcpes_reserved = [ 'vcpe{}.{}.{}'.format(i, cls.vcpes_reserved[i]['s_tag'], cls.vcpes_reserved[i]['c_tag'])
-                                    for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.untagged_dhcp_vcpes_reserved = [ 'vcpe{}'.format(i) for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.container_vcpes_reserved = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_reserved ]
-        vcpe_dhcp_reserved = None
-        vcpe_container_reserved = None
-        if cls.vcpes_reserved:
-            vcpe_dhcp_reserved = cls.dhcp_vcpes_reserved[0]
-            if cls.on_pod is False:
-                vcpe_dhcp_reserved = cls.untagged_dhcp_vcpes_reserved[0]
-            vcpe_container_reserved = cls.container_vcpes_reserved[0]
-
-        cls.vcpe_dhcp_reserved = vcpe_dhcp_reserved
-        cls.vcpe_container_reserved = vcpe_container_reserved
-        dhcp_vcpe_offset = len(cls.vcpes_reserved)
-        cls.dhcp_vcpes = [ 'vcpe{}.{}.{}'.format(i+dhcp_vcpe_offset, cls.vcpes_dhcp[i]['s_tag'], cls.vcpes_dhcp[i]['c_tag'])
-                           for i in xrange(len(cls.vcpes_dhcp))  ]
-        cls.untagged_dhcp_vcpes = [ 'vcpe{}'.format(i+dhcp_vcpe_offset) for i in xrange(len(cls.vcpes_dhcp)) ]
-        cls.container_vcpes = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_dhcp ]
-        vcpe_dhcp = None
-        vcpe_container = None
-        #cache the first dhcp vcpe in the class for quick testing
-        if cls.vcpes_dhcp:
-            vcpe_container = cls.container_vcpes[0]
-            vcpe_dhcp = cls.dhcp_vcpes[0]
-            if cls.on_pod is False:
-                vcpe_dhcp = cls.untagged_dhcp_vcpes[0]
-        cls.vcpe_container = vcpe_container_reserved or vcpe_container
-        cls.vcpe_dhcp = vcpe_dhcp_reserved or vcpe_dhcp
-        VSGAccess.setUp()
-        cls.setUpCordApi()
-        if cls.on_pod is True:
-            cls.openVCPEAccess(cls.volt_subscriber_info)
-
-    @classmethod
-    def setUpCordApi(cls):
-        our_path = os.path.dirname(os.path.realpath(__file__))
-        cord_api_path = os.path.join(our_path, '..', 'cord-api')
-        framework_path = os.path.join(cord_api_path, 'Framework')
-        utils_path = os.path.join(framework_path, 'utils')
-        data_path = os.path.join(cord_api_path, 'Tests', 'data')
-        subscriber_cfg = os.path.join(data_path, 'Subscriber.json')
-        volt_tenant_cfg = os.path.join(data_path, 'VoltTenant.json')
-        num_subscribers = max(cls.NUM_SUBSCRIBERS, 5)
-        cls.subscriber_info = cls.getSubscriberConfig(num_subscribers)
-        cls.volt_subscriber_info = cls.getVoltSubscriberConfig(num_subscribers)
-
-        sys.path.append(utils_path)
-        sys.path.append(framework_path)
-        from restApi import restApi
-        restApiXos = restApi()
-        xos_credentials = cls.getXosCredentials()
-        if xos_credentials is None:
-            restApiXos.controllerIP = cls.HEAD_NODE
-            restApiXos.controllerPort = '9000'
-        else:
-            restApiXos.controllerIP = xos_credentials['host']
-            restApiXos.controllerPort = xos_credentials['port']
-            restApiXos.user = xos_credentials['user']
-            restApiXos.password = xos_credentials['password']
-        cls.restApiXos = restApiXos
-
-    @classmethod
-    def getVoltId(cls, result, subId):
-        if type(result) is not type([]):
-            return None
-        for tenant in result:
-            if str(tenant['subscriber']) == str(subId):
-                return str(tenant['id'])
-        return None
-
-    @classmethod
-    def closeVCPEAccess(cls, volt_subscriber_info):
-        OnosCtrl.uninstall_app(cls.APP_NAME, onos_ip = cls.HEAD_NODE)
-
-    @classmethod
-    def openVCPEAccess(cls, volt_subscriber_info):
-        """
-        This code is used to configure leaf switch for head node access to compute node over fabric.
-        Care is to be taken to avoid overwriting existing/default vcpe flows.
-        The access is opened for generated subscriber info which should not overlap.
-        We target the fabric onos instance on head node.
-        """
-        OnosCtrl.install_app(cls.APP_FILE, onos_ip = cls.HEAD_NODE)
-        time.sleep(2)
-        s_tags = map(lambda tenant: int(tenant['voltTenant']['s_tag']), volt_subscriber_info)
-        #only get unique vlan tags
-        s_tags = list(set(s_tags))
-        devices = OnosCtrl.get_device_ids(controller = cls.HEAD_NODE)
-        if devices:
-            device_config = {}
-            for device in devices:
-                device_config[device] = []
-                for s_tag in s_tags:
-                    xconnect_config = {'vlan': s_tag, 'ports' : [ cls.FABRIC_PORT_HEAD_NODE, cls.FABRIC_PORT_COMPUTE_NODE ] }
-                    device_config[device].append(xconnect_config)
-
-            cfg = { 'apps' : { 'org.ciena.xconnect' : { 'xconnectTestConfig' : device_config } } }
-            OnosCtrl.config(cfg, controller = cls.HEAD_NODE)
-
-
-    @classmethod
-    def tearDownClass(cls):
-        OnboardingServiceUtils.tearDown()
-        VSGAccess.tearDown()
-        if cls.on_pod is True:
-            cls.closeVCPEAccess(cls.volt_subscriber_info)
-
-    def cliEnter(self,  controller = None):
-        retries = 0
-        while retries < 30:
-            self.cli = OnosCliDriver(controller = controller, connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    def onos_shutdown(self,  controller = None):
-        status = True
-        self.cliEnter(controller = controller)
-        try:
-            self.cli.shutdown(timeout = 10)
-        except:
-            log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
-            status = False
-
-        self.cliExit()
-        return status
-
-    def get_exampleservice_vm_public_ip(self, vm_name = 'mysite_exampleservice'):
-	if not vm_name:
-		vm_name = self.vm_name
-	exampleservices = OnboardingServiceUtils.get_exampleservices()
-	for service in exampleservices:
-		if vm_name in service.name:
-			return service.get_public_ip()
-	return None
-
-    def add_static_route_via_vcpe_interface(self, routes, vcpe=None,dhcp_ip=True):
-        if not vcpe:
-            vcpe = self.dhcp_vcpes_reserved[0]
-        if dhcp_ip:
-            os.system('dhclient '+vcpe)
-        time.sleep(1)
-        for route in routes:
-            log.info('route is %s'%route)
-            cmd = 'ip route add ' + route + ' via 192.168.0.1 '+ 'dev ' + vcpe
-            os.system(cmd)
-        return True
-
-    def del_static_route_via_vcpe_interface(self,routes,vcpe=None,dhcp_release=True):
-        if not vcpe:
-            vcpe = self.dhcp_vcpes_reserved[0]
-        cmds = []
-        for route in routes:
-            cmd = 'ip route del ' + route + ' via 192.168.0.1 ' + 'dev ' + vcpe
-            os.system(cmd)
-        if dhcp_release:
-            os.system('dhclient '+vcpe+' -r')
-        return True
-
-    def vsg_for_external_connectivity(self, subscriber_index, reserved = False):
-        if reserved is True:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes_reserved[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes_reserved[subscriber_index]
-        else:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes[subscriber_index]
-        mgmt = 'eth0'
-        host = '8.8.8.8'
-        self.success = False
-        assert_not_equal(vcpe, None)
-        vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        assert_not_equal(vcpe_ip, None)
-        log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        log.info('Sending icmp echo requests to external network 8.8.8.8')
-        st, _ = getstatusoutput('ping -c 3 8.8.8.8')
-        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        assert_equal(st, 0)
-
-    @deferred(50)
-    def test_exampleservice_health(self):
-        """
-        Algo:
-        1. Login to compute node VM
-        2. Get all exampleservice
-        3. Ping to all exampleservice
-        4. Verifying Ping success
-        """
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            status = OnboardingServiceUtils.health_check()
-            assert_equal(status, True)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(50)
-    def test_exampleservice_for_login(self):
-        """
-        Algo:
-        1. Login to compute node VM
-        2. Get all exampleservice
-        3. Login to all exampleservice
-        4. Verifying Login success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            exampleservice_access_status = map(lambda exampleservice: exampleservice.check_access(), exampleservices)
-            status = filter(lambda st: st == False, exampleservice_access_status)
-            assert_equal(len(status), 0)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(30)
-    def test_exampleservice_for_default_route_through_testclient(self):
-        """
-        Algo:
-        1. Login to Head node
-        2. Verify default route exists in test client
-        """
-        if self.on_pod is False:
-           return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-            cmd = "sudo lxc exec testclient -- route | grep default"
-            status, output = ssh_agent.run_cmd(cmd)
-            assert_equal(status, True)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(50)
-    def test_exampleservice_for_service_access_through_testclient(self):
-        """
-        Algo:
-        1. Login to Head node
-        2. Ping to all exampleservice from test client
-        3. Verifying Ping success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-            ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-            cmd = "sudo lxc exec testclient -- ping -c 3 {}".format(vm_public_ip)
-            status, output = ssh_agent.run_cmd(cmd)
-            assert_equal( status, True)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(30)
-    def test_exampleservice_for_service_reachability_from_cord_tester(self):
-        """
-        Algo:
-        1. Add static route to example service running VM IP in cord-tester
-        2. Ping to the VM IP
-        3. Verifying Ping success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-	    vcpe_intf = self.dhcp_vcpes_reserved[0]
-	    try:
-		self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(vm_public_ip))
-        	assert_equal(st, False)
-	    except Exception as error:
-		log.info('Got Unexpected  error %s'%error)
-		raise
-	    finally:
-		self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(40)
-    def test_exampleservice_operational_status_from_testclient(self):
-        """
-        Algo:
-        1. Login to Head node
-        2. Do curl request to the example service running VM IP from test client
-        3. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-            ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-            cmd = 'sudo lxc exec testclient -- apt-get install -y curl'
-            status, _  = ssh_agent.run_cmd(cmd)
-            assert_equal(status, True)
-            #Test connectivity to ExampleService from test client
-            cmd = 'sudo lxc exec testclient -- curl -s http://{}'.format(vm_public_ip)
-            status, _ = ssh_agent.run_cmd(cmd)
-            assert_equal(status, True)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(30)
-    def test_exampleservice_operational_access_from_cord_tester(self):
-        """
-        Algo:
-        1. Add static route to example service running VM IP in cord-tester
-        2. Do curl request to the VM IP
-        3. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-            try:
-	        self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-        	st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-        	assert_not_equal(out,'')
-            except Exception as error:
-                log.info('Got Unexpected  error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(40)
-    def test_exampleservice_for_service_message(self, service_message="\"hello\""):
-        """
-        Algo:
-	1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying Service message in curl response
-        """
-        if self.on_pod is False:
-            return
-	vm_public_ip = self.get_exampleservice_vm_public_ip()
-	vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-	    try:
-	        self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st,out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                output = out.split('\n')
-	        srvs_msg = ''
-                for line in output:
-                    line = line.split(':')
-                    if line[0].strip() == 'Service Message':
-                        srvs_msg = line[1].strip()
-	        assert_equal(service_message, srvs_msg)
-	    except Exception as error:
-	        log.info('Got Unexpected error %s'%error)
-	        raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(40)
-    def test_exampleservice_for_tenant_message(self, tenant_message="\"world\""):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying Tenant message in curl response
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-    	    vcpe_intf = self.dhcp_vcpes_reserved[0]
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st,out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                output = out.split('\n')
-                tnt_msg = ''
-                for line in output:
-                    line = line.split(':')
-                    if line[0].strip() == 'Tenant Message':
-                        tnt_msg = line[1].strip()
-                assert_equal(tenant_message, tnt_msg)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(60)
-    def test_exampleservice_access_after_subscriber_interface_toggle(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-	5. Toggle vcpe interface in cord-tester and do curl request again
-	6. Again verify curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-	    vcpe_intf = self.dhcp_vcpes_reserved[0]
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                #curl request from test container
-                cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st, out = getstatusoutput(cmd)
-                assert_not_equal(out,'')
-                st, _ = getstatusoutput('ifconfig {} down'.format(vcpe_intf))
-                assert_equal(st, False)
-		time.sleep(1)
-                st, _ = getstatusoutput('ifconfig {} up'.format(vcpe_intf))
-		assert_equal(st, False)
-                time.sleep(1)
-	        self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-	        st, out = getstatusoutput(cmd)
-                assert_not_equal(out,'')
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-		getstatusoutput('ifconfig {} up'.format(vcpe_intf))
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-
-    @deferred(60)
-    def test_exampleservice_access_after_service_paused(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Pause example service running VM and do curl request again
-        6. Verify curl response is an empty output
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-	    service_vm = None
-    	    vm_public_ip = self.get_exampleservice_vm_public_ip()
-	    vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-              	   break
-	    assert_not_equal(service_vm,None)
-	    try:
-            	self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            	st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-            	assert_not_equal(out,'')
-		log.info('Pausing example service running vm')
-            	service_vm.pause()
-		time.sleep(2)
-        	st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-        	assert_equal(out,'')
-		service_vm.unpause()
-		time.sleep(3)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		service_vm.unpause()
-		time.sleep(3)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    #Test failing. server state goes to error after resuming
-    @deferred(60)
-    def test_exampleservice_access_after_service_is_suspended(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Suspend example service running VM and do curl request again
-        6. Verify curl response is an empty output
-	7. Resume suspended VM and do curl request now
-	8. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-                   break
-            assert_not_equal(service_vm,None)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                log.info('Suspending example service running vm')
-                service_vm.suspend()
-                time.sleep(5)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_equal(out,'')
-                service_vm.resume()
-		time.sleep(5)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                service_vm.stop()
-		time.sleep(1)
-		service_vm.start()
-		time.sleep(5)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(60)
-    def test_exampleservice_access_after_service_restart(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Restart example service running VM and do curl request again
-        9. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-                   break
-            assert_not_equal(service_vm,None)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-		assert_not_equal(out,'')
-                log.info('Restarting example service running vm')
-                service_vm.reboot()
-		time.sleep(5)
-		clock = 0
-		status = False
-		while(clock <= 30):
-		    time.sleep(5)
-                    st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-		    if out != '':
-			status = True
-			break
-		    clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    #not test. vSG VM goes down after restart
-    @deferred(70)
-    def test_exampleservice_access_after_vsg_vm_restart(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Restart vSG VM and do curl request again
-        9. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved [0]
-	    vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                log.info('Restarting vSG VM')
-                vsg.reboot()
-                time.sleep(5)
-                clock = 0
-                status = False
-                while(clock <= 40):
-                    time.sleep(5)
-                    st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                    if out != '':
-                        status = True
-                        break
-                    clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(80)
-    def test_exampleservice_access_after_service_stop(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Stop example service running VM and do curl request again
-        6. Verify curl response is an empty output
-        7. Start stopped VM and do curl request now
-        8. Verifying curl request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-                   break
-            assert_not_equal(service_vm,None)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                log.info('Stopping example service running vm')
-                service_vm.stop()
-                time.sleep(5)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-		assert_equal(out,'')
-                service_vm.start()
-		time.sleep(5)
-                clock = 0
-                status = False
-                while(clock <= 60):
-                    time.sleep(5)
-                    st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                    if out != '':
-                        status = True
-                        break
-                    clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		service_vm.start()
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(80)
-    def test_exampleservice_for_service_message_after_service_stop_and_start(self, service_message="\"hello\""):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Stop example service running VM and do curl request again
-        6. Verify curl response is an empty output
-        7. Start stopped VM and do curl request now
-        8. Verifying Service message in curl response
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-	def test_exampleservice(df):
-	    service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-                   break
-            assert_not_equal(service_vm,None)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st,out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-	        log.info('Stopping example service running VM')
-                service_vm.stop()
-                time.sleep(5)
-                st, out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_equal(out,'')
-                service.start()
-	        time.sleep(5)
-		clock = 0
-		while(clock <= 60):
-		    time.sleep(5)
-                    st,out = getstatusoutput('curl -s http://{} --max-time 10'.format(vm_public_ip))
-		    if out != '':
-                	output = out.split('\n')
-                	srvs_msg = None
-                	for line in output:
-                    	    line = line.split(':')
-                    	    if line[0].strip() == 'Service Message':
-                        	srvs_msg = line[1].strip()
-				clock = 60
-				break
-		    clock += 5
-                assert_equal(service_message, srvs_msg)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                service_vm.start()
-		time.sleep(5)
-	    finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(80)
-    def test_exampleservice_for_tenant_message_after_service_restart(self,tenant_message="\"world\""):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Restart example service running VM and do curl request again
-        6. Verifying Tenant message in curl response
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            service_vm = None
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            exampleservices = OnboardingServiceUtils.get_exampleservices()
-            for service in exampleservices:
-                if self.vm_name in service.name:
-                   service_vm = service
-                   break
-            assert_not_equal(service_vm,None)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st,out = getstatusoutput('curl -s http://{} --max-time 5'.format(vm_public_ip))
-                assert_not_equal(out,'')
-                log.info('Restarting example service running VM')
-                service_vm.reboot()
-                time.sleep(5)
-                clock = 0
-                while(clock <= 40):
-                    time.sleep(5)
-                    st,out = getstatusoutput('curl -s http://{} --max-time 10'.format(vm_public_ip))
-                    if out != '':
-                        output = out.split('\n')
-                        tnnt_msg = None
-                        for line in output:
-                            line = line.split(':')
-                            if line[0].strip() == 'Tenant Message':
-                                tnnt_msg = line[1].strip()
-                                clock = 40
-                                break
-                    clock += 5
-                assert_equal(tenant_message, tnnt_msg)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(50)
-    def test_exampleservice_access_after_vcpe_instance_restart(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Restart vcpe instance and do curl request again
-        8. Verifying curl  request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved[0]
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                #curl request from test container
-                curl_cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st, out = getstatusoutput(curl_cmd)
-		assert_not_equal(out,'')
-                #restarting example service VM
-                cmd = 'sudo docker restart {}'.format(vcpe_name)
-                status, _ = vsg.run_cmd(cmd)
-                assert_equal(status, True)
-                time.sleep(5)
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                clock = 0
-                status = False
-                while(clock <= 30):
-                    time.sleep(5)
-                    st, out = getstatusoutput(curl_cmd)
-                    if out != '':
-                        status = True
-                        break
-                    clock += 5
-                assert_equal(status,True)
-            except Exception as error:
-                log.info('Got Unexpeted error %s'%error)
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(50)
-    def test_exampleservice_access_after_vcpe_instance_wan_interface_toggle(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Restart vcpe instance and do curl request again
-        8. Verifying curl  request success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved[0]
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            wan_intf = 'eth0'
-            mgmt = 'eth0'
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                #curl request from test container
-                curl_cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st, out = getstatusoutput(curl_cmd)
-                assert_not_equal(out,'')
-                st = VSGAccess.vcpe_wan_down(vcpe_name)
-                if st is False:
-                        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe_intf)
-                assert_not_equal(st, '0')
-                time.sleep(2)
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                curl_cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st, out = getstatusoutput(curl_cmd)
-                assert_equal(out,'')
-                st = VSGAccess.vcpe_wan_up(vcpe_name)
-                if st is False:
-                        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe_intf)
-                assert_not_equal(st, '0')
-                time.sleep(5)
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                st, out = getstatusoutput(curl_cmd)
-                assert_not_equal(out,'')
-            except Exception as error:
-                log.info('Got Unexpeted error %s'%error)
-                vsg.run_cmd('sudo docker restart {}'.format(vcpe_name,wan_intf))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(30)
-    def test_exampleservice_access_after_firewall_rule_added_to_drop_service_running_server_in_vcpe_instance(self):
-        """
-        Algo:
-        1. Get dhcp ip to vcpe interface in cord-tester
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Add a firewall rule in vcpe instance to drop packets destined to example service VM
-        6. Do curl request now
-        7. Verifying curl response is an empty output
-	8. Delete the firewall rule and do curl request again
-	9. Verifying curl request success
-        """
-        df = defer.Deferred()
-        def test_exampleservice(df,vcpe_intf=vcpe_intf,vcpe_name=vcpe_name):
-            vcpe_intf = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved[0]
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                #curl request from test container
-                curl_cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st, out = getstatusoutput(curl_cmd)
-                assert_not_equal(out,'')
-                #restarting example service VM
-                cmd = 'sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,vm_public_ip)
-                status, _ = vsg.run_cmd(cmd)
-                assert_equal(status, True)
-                time.sleep(1)
-                st, out = getstatusoutput(curl_cmd)
-                assert_equal(out,'')
-            except Exception as error:
-                log.info('Got Unexpeted error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,vm_public_ip))
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-
-    def vsg_xos_subscriber_create(self, index, subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return ''
-        if subscriber_info is None:
-            subscriber_info = self.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        vcpe = 'vcpe-{}-{}'.format(s_tag, c_tag)
-        log.info('Creating tenant with s_tag: %d, c_tag: %d' %(s_tag, c_tag))
-        subId = ''
-        try:
-            result = self.restApiXos.ApiPost('TENANT_SUBSCRIBER', subscriber_info)
-            assert_equal(result, True)
-            result = self.restApiXos.ApiGet('TENANT_SUBSCRIBER')
-            assert_not_equal(result, None)
-            subId = self.restApiXos.getSubscriberId(result, volt_subscriber_info['account_num'])
-            assert_not_equal(subId, '0')
-            log.info('Subscriber ID for account num %s = %s' %(str(volt_subscriber_info['account_num']), subId))
-            volt_tenant = volt_subscriber_info['voltTenant']
-            #update the subscriber id in the tenant info before making the rest
-            volt_tenant['subscriber'] = subId
-            result = self.restApiXos.ApiPost('TENANT_VOLT', volt_tenant)
-            assert_equal(result, True)
-            #if the vsg instance was already instantiated, then reduce delay
-            if c_tag % self.subscribers_per_s_tag == 0:
-                delay = 350
-            else:
-                delay = 90
-            log.info('Delaying %d seconds for the VCPE to be provisioned' %(delay))
-            time.sleep(delay)
-            log.info('Testing for external connectivity to VCPE %s' %(vcpe))
-            self.vsg_for_external_connectivity(index)
-        finally:
-            return subId
-
-    def vsg_xos_subscriber_delete(self, index, subId = '', voltId = '', subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return
-        if subscriber_info is None:
-            subscriber_info = self.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        vcpe = 'vcpe-{}-{}'.format(s_tag, c_tag)
-        log.info('Deleting tenant with s_tag: %d, c_tag: %d' %(s_tag, c_tag))
-        if not subId:
-            #get the subscriber id first
-            result = self.restApiXos.ApiGet('TENANT_SUBSCRIBER')
-            assert_not_equal(result, None)
-            subId = self.restApiXos.getSubscriberId(result, volt_subscriber_info['account_num'])
-            assert_not_equal(subId, '0')
-        if not voltId:
-            #get the volt id for the subscriber
-            result = self.restApiXos.ApiGet('TENANT_VOLT')
-            assert_not_equal(result, None)
-            voltId = self.getVoltId(result, subId)
-            assert_not_equal(voltId, None)
-        log.info('Deleting subscriber ID %s for account num %s' %(subId, str(volt_subscriber_info['account_num'])))
-        status = self.restApiXos.ApiDelete('TENANT_SUBSCRIBER', subId)
-        assert_equal(status, True)
-        #Delete the tenant
-        log.info('Deleting VOLT Tenant ID %s for subscriber %s' %(voltId, subId))
-        self.restApiXos.ApiDelete('TENANT_VOLT', voltId)
-
-    def vsg_xos_subscriber_id(self, index):
-        log.info('index and its type are %s, %s'%(index, type(index)))
-        volt_subscriber_info = self.volt_subscriber_info[index]
-        result = self.restApiXos.ApiGet('TENANT_SUBSCRIBER')
-        assert_not_equal(result, None)
-        subId = self.restApiXos.getSubscriberId(result, volt_subscriber_info['account_num'])
-        return subId
-
-    @deferred(500)
-    def test_xos_subcriber_access_exampleservice(self,index=0):
-        """
-        Algo:
-        1. Create two vcpe instances using XOS
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Repeat steps for both vcpes
-        """
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf = self.dhcp_vcpes[0]
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            try:
-                self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                time.sleep(1)
-                cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                st,out = getstatusoutput(cmd)
-                assert_not_equal(out,'')
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-	    finally:
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf)
-                self.vsg_xos_subscriber_delete(index, subId = subId)
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
-
-    @deferred(500)
-    def test_exampleservice_multiple_subcribers_access_same_service(self,index1=0,index2=1):
-        """
-        Algo:
-        1. Create two vcpe instances using XOS
-        2. Add static route to example service running VM IP in cord-tester
-        3. Do curl request to the VM IP
-        4. Verifying curl request success
-        5. Repeat steps for both vcpes
-        """
-        df = defer.Deferred()
-        def test_exampleservice(df):
-            vm_public_ip = self.get_exampleservice_vm_public_ip()
-            vcpe_intf1 = self.dhcp_vcpes[0]
-            vcpe_intf2 = self.dhcp_vcpes[1]
-            subId1 = self.vsg_xos_subscriber_id(index1)
-            if subId1 == '0':
-                subId1 = self.vsg_xos_subscriber_create(index1)
-            assert_not_equal(subId1,'0')
-            subId2 = self.vsg_xos_subscriber_id(index2)
-            if subId2 == '0':
-                subId2 = self.vsg_xos_subscriber_create(index2)
-            assert_not_equal(subId2,'0')
-            try:
-                for vcpe in [vcpe_intf1,vcpe_intf2]:
-                    self.add_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf1)
-                    time.sleep(1)
-                    status = False
-                    cmd = 'curl -s http://{} --max-time 5'.format(vm_public_ip)
-                    st,out = getstatusoutput(cmd)
-                    assert_not_equal(out,'')
-                    self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf2)
-                    time.sleep(1)
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf1)
-                self.del_static_route_via_vcpe_interface([vm_public_ip],vcpe=vcpe_intf2)
-                self.vsg_xos_subscriber_delete(index1, subId = subId1)
-                self.vsg_xos_subscriber_delete(index2, subId = subId2)
-                raise
-            df.callback(0)
-        reactor.callLater(0,test_exampleservice,df)
-        return df
diff --git a/src/test/onosCli/__init__.py b/src/test/onosCli/__init__.py
deleted file mode 100644
index 52b3c4b..0000000
--- a/src/test/onosCli/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
diff --git a/src/test/onosCli/onosCliTest.py b/src/test/onosCli/onosCliTest.py
deleted file mode 100644
index 46d5eff..0000000
--- a/src/test/onosCli/onosCliTest.py
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import time
-import os
-import json
-from nose.tools import *
-from onosclidriver import OnosCliDriver
-from OnosCtrl import OnosCtrl
-from CordTestUtils import log_test as log
-
-log.setLevel('INFO')
-
-class routes_exchange(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.cli = OnosCliDriver(connect = True)
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.cli.disconnect()
-
-    def test_route_cli(self):
-        routes = json.loads(self.cli.routes(jsonFormat = True))
-        log.info('Routes: %s' %routes)
-
-    def test_devices_cli(self):
-        devices = json.loads(self.cli.devices(jsonFormat = True))
-        available_devices = filter(lambda d: d['available'], devices)
-        device_ids = [ d['id'] for d in devices ]
-        log.info('Available Devices: %s' %devices)
-        log.info('Device IDS: %s' %device_ids)
-
-    def test_flows_cli(self):
-        flows = json.loads(self.cli.flows(jsonFormat = True))
-        flows = filter(lambda f: f['flows'], flows)
-        log.info('Flows: %s' %flows)
diff --git a/src/test/pap/__init__.py b/src/test/pap/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/pap/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/pap/papTest.py b/src/test/pap/papTest.py
deleted file mode 100644
index 59c8a0e..0000000
--- a/src/test/pap/papTest.py
+++ /dev/null
@@ -1,45 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import os,sys
-from nose.tools import assert_equal
-from EapPAP import PAPAuthTest
-
-class eap_auth_exchange(unittest.TestCase):
-      def test_eap_pap(self):
-          pap = PAPAuthTest()
-          pap.runTest()
-
-if __name__ == '__main__':
-    t = PAPAuthTest()
-    t.runTest()
-    
diff --git a/src/test/perf/pktgen_dpdk.sh b/src/test/perf/pktgen_dpdk.sh
deleted file mode 100644
index a9ca83d..0000000
--- a/src/test/perf/pktgen_dpdk.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-
-#Get system info
-name=`uname -n`
-
-#Export env variables to inherit
-export MY_DIR=$(pwd)
-export DPDK_TAG=v16.11
-export OVS_TAG=branch-2.7
-
-#Clone repo and build DPDK
-git clone http://dpdk.org/git/dpdk -b $DPDK_TAG $MY_DIR/dpdk
-cd $MY_DIR/dpdk
-export RTE_SDK=$(pwd)
-export RTE_TARGET=x86_64-native-linuxapp-gcc
-make -j4 install T=$RTE_TARGET
-
-#Set for kernel mod
-sudo apt-get install libpcap-dev
-sudo apt-get install linux-headers-`uname -r`
-
-#Hugepage allocation
-sudo -E sysctl -w vm.nr_hugepages=1024
-sudo -E umount /dev/hugepages
-sudo -E mkdir -p /dev/hugepages
-sudo -E mount -t hugetlbfs -o pagesize=2048k none /dev/hugepages
-
-#Traffic generator powered by DPDK
-wget http://fast.dpdk.org/rel/dpdk-16.11.1.tar.xz
-wget http://dpdk.org/browse/apps/pktgen-dpdk/snapshot/pktgen-dpdk-pktgen-3.1.2.tar.gz
-tar -xf dpdk-16.11.1.tar.xz
-tar -xf pktgen-dpdk-pktgen-3.1.2.tar.gz
-
-#Export env vars and make
-export RTE_SDK=/home/ubuntu/dpdk-stable-16.11.1
-export PKTGEN=/home/ubuntu/pktgen-dpdk-pktgen-3.1.2
-export PKTGEN=/home/ubuntu/pktgen-dpdk-pktgen-3.1.2
-export RTE_TARGET=x86_64-native-linuxapp-gcc
-cd $RTE_SDK
-make install T=x86_64-native-linuxapp-gcc
-cd $PKTGEN
-make
-
-#Loads the UIO support module
-if lsmod | grep "uio" &> /dev/null ; then
-echo "uio module is loaded"
-else
-modprobe uio
-fi
-
-#Loading the the igb-uio.ko module
-if lsmod | grep "igb_uio" &> /dev/null ; then
-echo "igb_uio module is loaded"
-else
-insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-fi
-
-iface=`python $RTE_SDK/tools/dpdk-devbind.py -s | awk '/ens4/ {print $1}'`
-python $RTE_SDK/tools/dpdk-devbind.py -b igb_uio $iface
-
-# RUN Pktgen
-# -c COREMASK (0x3ff) (1111111111) 10 cores used with first core used for pktgen,
-# -n Memory channels, -socket memory for each cpu, -m for memory allocation
-# -P Promiscuous mode for all ports
-./app/x86_64-native-linuxapp-gcc/pktgen -c 0x3ff -n 2 --proc-type auto --socket-mem 4096 -- -T -P -m "[2-5:6-9].0"
diff --git a/src/test/proxyarp/__init__.py b/src/test/proxyarp/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/proxyarp/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/proxyarp/proxyarpTest.py b/src/test/proxyarp/proxyarpTest.py
deleted file mode 100644
index 015ab7b..0000000
--- a/src/test/proxyarp/proxyarpTest.py
+++ /dev/null
@@ -1,470 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from scapy.all import *
-from CordTestUtils import get_mac, log_test
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnosFlowCtrl import OnosFlowCtrl
-from onosclidriver import OnosCliDriver
-from CordContainer import Container, Onos, Quagga
-from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart
-from portmaps import g_subscriber_port_map
-import threading
-from threading import current_thread
-import time
-import os
-import json
-log_test.setLevel('INFO')
-
-
-class proxyarp_exchange(unittest.TestCase):
-
-    apps = ('org.onosproject.vrouter','org.onosproject.proxyarp')
-    device_id = 'of:' + get_mac()
-    device_dict = { "devices" : {
-                "{}".format(device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    MAX_PORTS = 100
-    hosts_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        cls.load_device_id()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the vrouter apps'''
-        #cls.vrouter_host_unload()
-
-    @classmethod
-    def load_device_id(cls):
-        did = OnosCtrl.get_device_id()
-        cls.device_id = did
-        cls.device_dict = { "devices" : {
-                "{}".format(did) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-            },
-        }
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def proxyarp_host_unload(cls):
-        index = 1
-        for host,_ in cls.hosts_list:
-            iface = cls.port_map[index]
-            index += 1
-            config_cmds = ('ifconfig {} 0'.format(iface), )
-            for cmd in config_cmds:
-		log_test.info('host unload command %s' % cmd)
-                os.system(cmd)
-
-    @classmethod
-    def interface_config_load(cls, interface_cfg = None):
-  	if type(interface_cfg) is tuple:
-            res = []
-            for v in interface_cfg:
-		if type(v) == list:
-		    pass
-		else:
-                    res += v.items()
-                    config = dict(res)
-        else:
-            config = interface_cfg
-        cfg = json.dumps(config)
-        with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
-            f.write(cfg)
-        return cord_test_onos_restart()
-
-    @classmethod
-    def host_config_load(cls, host_config = None):
-	for host in host_config:
-	    status, code = OnosCtrl.host_config(host)
-	    if status is False:
-                log_test.info('JSON request returned status %d' %code)
-                assert_equal(status, True)
-
-    @classmethod
-    def generate_interface_config(cls, hosts = 1):
-        num = 0
-        start_host = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
-        end_host =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        hosts_list = []
-        for n in xrange(start_host, end_host, 256):
-            port_map = ports_dict['ports']
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 1
-            host_ip = n + 2
-            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
-            host = '%d.%d.%d.%d' % ( (host_ip >> 24) & 0xff, ( ( host_ip >> 16) & 0xff ), ( (host_ip >> 8 ) & 0xff ), host_ip & 0xff )
-            mac = RandMAC()._fix()
-            hosts_list.append((host, mac))
-            if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == hosts:
-                break
-        cls.hosts_list = hosts_list
-        return (cls.device_dict, ports_dict, hosts_list)
-
-    @classmethod
-    def generate_host_config(cls):
-        num = 0
-        hosts_dict = {}
-        for host, mac in cls.hosts_list:
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-	    hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': port}}
-            num += 1
-        return hosts_dict.values()
-
-    @classmethod
-    def proxyarp_activate(cls, deactivate = False):
-        app = 'org.onosproject.proxyarp'
-        onos_ctrl = OnosCtrl(app)
-        if deactivate is True:
-            onos_ctrl.deactivate()
-        else:
-            onos_ctrl.activate()
-        time.sleep(3)
-
-    @classmethod
-    def proxyarp_config(cls, hosts = 1):
-        proxyarp_configs = cls.generate_interface_config(hosts = hosts)
-	cls.interface_config_load(interface_cfg = proxyarp_configs)
-	hostcfg = cls.generate_host_config()
-	cls.host_config_load(host_config = hostcfg)
-        return proxyarp_configs
-
-    def proxyarp_arpreply_verify(self, ingress, hostip, hostmac, PositiveTest=True):
-	log_test.info('verifying arp reply for host ip %s host mac %s on interface %s'%(hostip ,hostmac ,self.port_map[ingress]))
-	self.success = False
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                self.success = True if PositiveTest == True else False
-            sniff(count=1, timeout=2, lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
-                  prn = recv_cb, iface = self.port_map[ingress])
-        t = threading.Thread(target = recv_task)
-        t.start()
-        pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst=hostip))
-        log_test.info('sending arp request  for dest ip %s on interface %s' %
-                 (hostip, self.port_map[ingress]))
-        sendp( pkt, count = 10, iface = self.port_map[ingress])
-        t.join()
-	if PositiveTest:
-            assert_equal(self.success, True)
-	else:
-	    assert_equal(self.success, False)
-
-    def __proxyarp_hosts_verify(self, hosts = 1,PositiveTest = True):
-        _,_,hosts_config = self.proxyarp_config(hosts = hosts)
-	log_test.info('\nhosts_config %s and its type %s'%(hosts_config,type(hosts_config)))
-        self.cliEnter()
-        connected_hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log_test.info('Discovered hosts: %s' %connected_hosts)
-        #We read from cli if we expect less number of routes to avoid cli timeouts
-        if hosts <= 10000:
-            assert_equal(len(connected_hosts), hosts)
-	ingress = hosts+1
-	for hostip, hostmac in hosts_config:
-	        self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = PositiveTest)
-		time.sleep(1)
-	self.cliExit()
-        return True
-
-    def test_proxyarp_with_1_host(self, hosts=1):
-        res = self.__proxyarp_hosts_verify(hosts = hosts)
-        assert_equal(res, True)
-	#cls.proxyarp_host_unload()
-    def test_proxyarp_with_10_hosts(self, hosts=10):
-        res = self.__proxyarp_hosts_verify(hosts = hosts)
-        assert_equal(res, True)
-    def test_proxyarp_with_50_hosts(self, hosts=50):
-        res = self.__proxyarp_hosts_verify(hosts = hosts)
-        assert_equal(res, True)
-    def test_proxyarp_app_with_disabling_and_re_enabling(self,hosts = 3):
-	ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
-	ingress = hosts+1
-	for hostip, hostmac in hosts_config:
-	    self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-	    time.sleep(1)
-	log_test.info('Deactivating proxyarp  app and expecting not to get arp reply from ONOS')
-	self.proxyarp_activate(deactivate = True)
-	for hostip, hostmac in hosts_config:
-	    self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
-	    time.sleep(1)
-	log_test.info('activating proxyarp  app and expecting to get arp reply from ONOS')
-	self.proxyarp_activate(deactivate = False)
-	for hostip, hostmac in hosts_config:
-            self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-
-    def test_proxyarp_nonexisting_host(self,hosts = 1):
-    	_,_,hosts_config = self.proxyarp_config(hosts = hosts)
-	ingress = hosts + 2
-	for host, mac in hosts_config:
-	    self.proxyarp_arpreply_verify(ingress,host,mac,PositiveTest = True)
-	new_host = hosts_config[-1][0].split('.')
-	new_host[2] = str(int(new_host[2])+1)
-	new_host = '.'.join(new_host)
-	new_mac =  RandMAC()._fix()
-	log_test.info('verifying arp reply for host ip %s on interface %s'%(new_host,self.port_map[ingress]))
-	res=srp1(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst=new_host),timeout=2,iface=self.port_map[ingress])
-	assert_equal(res, None)
-	log_test.info('arp reply not seen for host ip %s on interface %s as expected'%(new_host,self.port_map[ingress]))
-	hosts = hosts + 1
-	_,_,hosts_config = self.proxyarp_config(hosts = hosts)
-	for host in hosts_config:
-	    if host[0] == new_host:
-		new_mac = host[1]
-	self.proxyarp_arpreply_verify(ingress,new_host,new_mac,PositiveTest = True)
-
-    def test_proxyarp_removing_host(self,hosts = 3):
-        ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
-        ingress = hosts+1
-        for hostip, hostmac in hosts_config:
-            self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
-            time.sleep(1)
-	host_mac = hosts_config[0][1]
-        log_test.info('removing host entry %s' % host_mac)
-        self.cliEnter()
-        hostentries = json.loads(self.cli.hosts(jsonFormat = True))
-        for host in hostentries:
-	    res = host_mac.upper() in host.values()
-	    if res:
-	 	break
-	assert_equal(res, True)
-        hostid = host_mac+'/'+'None'
-        delete_host  = self.cli.host_remove(hostid)
-        hostentries = json.loads(self.cli.hosts(jsonFormat = True))
-	for host in hostentries:
-            res = host_mac.upper() in host.values()
-            if res:
-                break
-        assert_equal(res, False)
-        self.proxyarp_arpreply_verify(ingress,hosts_config[0][0],host_mac,PositiveTest = False)
-        time.sleep(1)
-        self.cliExit()
-
-    def test_proxyarp_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts = 10):
-	ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
-	self.success = True
-	ingress = hosts+1
-	ports = range(ingress,ingress+10)
-	hostmac = []
-	hostip = []
-	for ip,mac in hosts_config:
-	    hostmac.append(mac)
-	    hostip.append(ip)
-	success_dir = {}
-	def verify_proxyarp(*r):
-            ingress,hostmac,hostip = r[0],r[1],r[2]
-            def mac_recv_task():
-                def recv_cb(pkt):
-		    log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                    success_dir[current_thread().name] = True
-		sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
-                    prn = recv_cb, iface = self.port_map[ingress])
-	    t = threading.Thread(target = mac_recv_task)
-	    t.start()
-	    pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
-            log_test.info('sending arp request  for dest ip %s on interface %s' %
-                 (hostip,self.port_map[ingress]))
-            sendp(pkt, count = 10,iface = self.port_map[ingress])
-            t.join()
-	t = []
-	for i in range(10):
-	    t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-	    t[i].start()
-	for i in range(10):
-            t[i].join()
-        if len(success_dir) != 10:
-                self.success = False
-        assert_equal(self.success, True)
-
-    def test_proxyarp_disabling_enabling_app_initiating_concurrent_requests(self,hosts = 10):
-	'''Test sending arp requests to multiple host ips at once from different interfaces by disabling and re-enabling proxyarp app'''
-        ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
-        self.success = True
-        ingress = hosts+1
-        ports = range(ingress,ingress+10)
-        hostmac = []
-        hostip = []
-        for ip,mac in hosts_config:
-            hostmac.append(mac)
-            hostip.append(ip)
-        success_dir = {}
-        def verify_proxyarp(*r):
-            ingress,hostmac,hostip = r[0],r[1],r[2]
-            def mac_recv_task():
-                def recv_cb(pkt):
-                    log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                    success_dir[current_thread().name] = True
-                sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
-                    prn = recv_cb, iface = self.port_map[ingress])
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
-            log_test.info('sending arp request  for dest ip %s on interface %s' %
-                 (hostip,self.port_map[ingress]))
-            sendp(pkt, count = 10,iface = self.port_map[ingress])
-            t.join()
-        t1 = []
-	#starting multi threading before proxyarp disable
-        for i in range(10):
-            t1.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-            t1[i].start()
-        for i in range(10):
-            t1[i].join()
-        if len(success_dir) != 10:
-                self.success = False
-        assert_equal(self.success, True)
-	self.proxyarp_activate(deactivate = True)
-	#starting multi threading after proxyarp disable
-	t2 = []
-	self.success = False
-	for i in range(10):
-            t2.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-            t2[i].start()
-        for i in range(10):
-            t2[i].join()
-        if len(success_dir) != 10:
-                self.success = True
-        assert_equal(self.success, False)
-	self.proxyarp_activate(deactivate = False)
-	#starting multi threading after proxyarp re-enable
-	self.success = True
-	t3 = []
-	for i in range(10):
-            t3.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-            t3[i].start()
-        for i in range(10):
-            t3[i].join()
-        if len(success_dir) != 20:
-                self.success = False
-	assert_equal(self.success, True)
-
-    def test_proxyarp_with_existing_and_non_existing_hostIPs_initiating_concurrent_requests(self,hosts = 5):
-        ports_map, egress_map,hosts_config = self.proxyarp_config(hosts = hosts)
-        self.success = True
-        ingress = hosts+1
-        ports = range(ingress,ingress+10)
-        hostmac = []
-        hostip = []
-        for ip,mac in hosts_config:
-            hostmac.append(mac)
-            hostip.append(ip)
-	#adding 5 non-existing host IPs to hostip list
-	for i in range(1,6):
-	    ip = hostip[-1].split('.')
-	    ip[3] = str(int(ip[3])+int(i))
-            ip = '.'.join(ip)
-	    hostip.append(ip)
-	    hostmac.append(RandMAC()._fix())
-        success_dir = {}
-	replied_hosts = []
-        def verify_proxyarp(*r):
-            ingress,hostmac,hostip = r[0],r[1],r[2]
-            def mac_recv_task():
-                def recv_cb(pkt):
-                    log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                    success_dir[current_thread().name] = True
-		    replied_hosts.append(hostip)
-                sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].psrc == hostip,
-                    prn = recv_cb, iface = self.port_map[ingress])
-            t = threading.Thread(target = mac_recv_task)
-            t.start()
-            pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
-            log_test.info('sending arp request  for dest ip %s on interface %s' %
-                 (hostip,self.port_map[ingress]))
-            sendp(pkt, count = 10,iface = self.port_map[ingress])
-            t.join()
-        t = []
-        for i in range(10):
-            t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
-        for i in range(10):
-            t[i].start()
-        for i in range(10):
-            t[i].join()
-        if len(success_dir) != 5 and len(replied_hosts) != 5:
-                self.success = False
-        assert_equal(self.success, True)
-	for i in range(5):
-	    if hostip[i] not in replied_hosts:
-		self.success = False
-	assert_equal(self.success, True)
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_reboot b/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_reboot
deleted file mode 100644
index d5745f5..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_reboot
+++ /dev/null
@@ -1,216 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchang  e.test_dhcpl2relay_after_server_reboot --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 -  -olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHO  T.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [12/Oct/2017 05:10:03] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_reboot
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_reb  oot
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHO  T.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_reboot'] tests across 1 containers   in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_  server_reboot']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_reboot']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_after_server_reboot (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47  .64]:8101 found: line 1 type RSA
-DHCP server running on remote host and list of service commands are
- ['usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd.conf -lf /root/test/src/test/setup/dhcpd.l  eases veth22', 'usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd-tagged.conf -lf /root/test/sr  c/test/setup/dhcpd-tagged.leases veth22.333.254']
-Got dhcp client IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 .
-server rebooting...
-DHCP server is rebooted
-
-Test test_dhcpl2relay_after_server_reboot has errors and warnings
-
-2017-10-12 12:10:49,734 | WARN  | f-event-stats-29 | FlowModBuilderVer13              | 194 - org.onosprojec  t.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:10:50,251 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.i  gmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-12 12:10:54,736 | WARN  | f-event-stats-20 | FlowModBuilderVer13              | 194 - org.onosprojec  t.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 45.683s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_after_server_reboot Success
-Done running tests
-127.0.0.1 - - [12/Oct/2017 05:11:01] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_shutting_down b/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_shutting_down
deleted file mode 100644
index 19fa66f..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_shutting_down
+++ /dev/null
@@ -1,218 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [12/Oct/2017 05:06:44] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_after_server_shutting_down (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-DHCP server running on remote host and list of service commands are
- ['usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd.conf -lf /root/test/src/test/setup/dhcpd.leases veth22', 'usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd-tagged.conf -lf /root/test/src/test/setup/dhcpd-tagged.leases veth22.333.254']
-Got dhcp client IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 .
-server rebooting...
-DHCP server is stopped
-
-Test test_dhcpl2relay_after_server_shutting_down has errors and warnings
-
-2017-10-12 12:07:29,762 | WARN  | of-event-stats-5 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:07:34,762 | WARN  | f-event-stats-14 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:07:39,734 | WARN  | f-event-stats-11 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:07:42,926 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-12 12:07:44,736 | WARN  | f-event-stats-18 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 54.023s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_after_server_shutting_down Success
-Done running tests
-127.0.0.1 - - [12/Oct/2017 05:07:51] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_stop_start b/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_stop_start
deleted file mode 100644
index 7f39a5d..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_after_server_stop_start
+++ /dev/null
@@ -1,219 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [12/Oct/2017 05:13:28] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_after_server_stop_start (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-DHCP server running on remote host and list of service commands are
- ['usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd.conf -lf /root/test/src/test/setup/dhcpd.leases veth22', 'usr/sbin/dhcpd -4 --no-pid -cf /root/test/src/test/setup/dhcpd-tagged.conf -lf /root/test/src/test/setup/dhcpd-tagged.leases veth22.333.254']
-Got dhcp client IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 .
-server rebooting...
-DHCP server is stopped
-DHCP server is started
-client got same IP after server rebooted, as expected
-
-Test test_dhcpl2relay_after_server_stop_start has errors and warnings
-
-2017-10-12 12:14:14,762 | WARN  | f-event-stats-10 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:14:19,737 | WARN  | f-event-stats-26 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:14:24,736 | WARN  | of-event-stats-4 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 12:14:29,762 | WARN  | of-event-stats-2 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 56.005s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_after_server_stop_start Success
-Done running tests
-127.0.0.1 - - [12/Oct/2017 05:14:36] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_app_activation_and_deactivation_multiple_times b/src/test/results/dhcpl2relay/test_dhcpl2relay_app_activation_and_deactivation_multiple_times
deleted file mode 100644
index 8396079..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_app_activation_and_deactivation_multiple_times
+++ /dev/null
@@ -1,250 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 00:42:55] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_app_activation_and_deactivation_multiple_times (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Dhcpl2relay app is activated and deactivated multiple times around 15 times, now sending DHCP discover
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-
-Test test_dhcpl2relay_app_activation_and_deactivation_multiple_times has errors and warnings
-
-2017-10-11 07:43:40,857 | WARN  | ice-installer-11 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:40,862 | WARN  | vice-installer-9 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:42,528 | WARN  | of-event-stats-1 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:43:43,956 | WARN  | ice-installer-18 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:43,975 | WARN  | ice-installer-16 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:46,212 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 07:43:47,179 | WARN  | ice-installer-27 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:47,183 | WARN  | ice-installer-28 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:47,533 | WARN  | of-event-stats-9 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:43:50,270 | WARN  | vice-installer-6 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:50,280 | WARN  | vice-installer-5 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:52,527 | WARN  | f-event-stats-29 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:43:53,369 | WARN  | ice-installer-14 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:53,370 | WARN  | ice-installer-17 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:56,462 | WARN  | ice-installer-25 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:56,463 | WARN  | ice-installer-29 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:57,527 | WARN  | f-event-stats-14 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:43:59,556 | WARN  | vice-installer-1 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:43:59,557 | WARN  | vice-installer-5 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:02,527 | WARN  | f-event-stats-15 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:44:02,642 | WARN  | ice-installer-19 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:02,649 | WARN  | ice-installer-20 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:05,736 | WARN  | ice-installer-23 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:05,736 | WARN  | ice-installer-26 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:07,526 | WARN  | f-event-stats-10 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:44:08,816 | WARN  | vice-installer-3 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:08,817 | WARN  | vice-installer-4 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:11,896 | WARN  | vice-installer-9 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:11,897 | WARN  | ice-installer-14 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:12,525 | WARN  | of-event-stats-9 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:44:14,975 | WARN  | ice-installer-24 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:14,976 | WARN  | ice-installer-26 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:17,528 | WARN  | f-event-stats-16 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:44:18,059 | WARN  | ice-installer-31 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:18,061 | WARN  | ice-installer-30 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:21,142 | WARN  | vice-installer-9 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:21,142 | WARN  | ice-installer-14 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:22,529 | WARN  | of-event-stats-1 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:44:24,224 | WARN  | ice-installer-24 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 07:44:24,225 | WARN  | ice-installer-16 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 81.972s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_app_activation_and_deactivation_multiple_times Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 00:44:29] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_app_install b/src/test/results/dhcpl2relay/test_dhcpl2relay_app_install
deleted file mode 100644
index 61e1419..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_app_install
+++ /dev/null
@@ -1,136 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_app_install']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_app_install (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.dhcpl2relay app is being installed
-
-Test test_dhcpl2relay_app_install has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.530s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_app_install Success
-Done running tests
-Removing test container cord-tester1
-127.0.0.1 - - [10/Oct/2017 22:53:14] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_delete_and_add_sadis_app b/src/test/results/dhcpl2relay/test_dhcpl2relay_delete_and_add_sadis_app
deleted file mode 100644
index 0d4e275..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_delete_and_add_sadis_app
+++ /dev/null
@@ -1,215 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 01:56:12] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_delete_and_add_sadis_app (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Uninstall the sadis app from onos ,app version = /root/test/src/test/dhcpl2relay/../apps/sadis-app-3.0-SNAPSHOT.oar
-Installing the sadis app in onos again, app version = /root/test/src/test/dhcpl2relay/../apps/sadis-app-3.0-SNAPSHOT.oar
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-
-Test test_dhcpl2relay_delete_and_add_sadis_app has errors and warnings
-
-2017-10-11 08:56:57,530 | WARN  | f-event-stats-27 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 08:57:02,529 | WARN  | of-event-stats-2 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 08:57:05,722 | WARN  | vice-installer-5 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-2017-10-11 08:57:05,723 | WARN  | vice-installer-9 | DistributedFlowRuleStore         | 129 - org.onosproject.onos-core-dist - 1.10.6 | No master for of:00002ec0feee2d4b : flows will be marked for removal
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 46.245s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_delete_and_add_sadis_app Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 01:57:11] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast b/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast
deleted file mode 100644
index 665a11c..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast
+++ /dev/null
@@ -1,212 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:23:25] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Dhcp server rejected client discover with invalid source mac, as expected
-
-Test test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast has errors and warnings
-
-2017-10-11 09:24:07,525 | WARN  | of-event-stats-1 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:24:12,550 | WARN  | f-event-stats-16 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:24:17,528 | WARN  | of-event-stats-6 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.370s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:24:21] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast b/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast
deleted file mode 100644
index 61f313e..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast
+++ /dev/null
@@ -1,211 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:26:27] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Dhcp server rejected client discover with invalid source mac, as expected
-
-Test test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast has errors and warnings
-
-2017-10-11 09:27:12,526 | WARN  | f-event-stats-11 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:27:17,554 | WARN  | f-event-stats-10 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.323s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:27:24] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero b/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero
deleted file mode 100644
index 2f183a6..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero
+++ /dev/null
@@ -1,212 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:29:25] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-dhcp server rejected client discover with invalid source mac, as expected
-
-Test test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero has errors and warnings
-
-2017-10-11 09:30:12,527 | WARN  | of-event-stats-2 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:30:17,554 | WARN  | f-event-stats-30 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.389s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:30:22] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_nak_packet b/src/test/results/dhcpl2relay/test_dhcpl2relay_nak_packet
deleted file mode 100644
index 984818b..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_nak_packet
+++ /dev/null
@@ -1,214 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:22:08] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_nak_packet (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 .
-server sent NAK packet when requested other IP than that server offered
-
-Test test_dhcpl2relay_nak_packet has errors and warnings
-
-2017-10-11 10:22:52,527 | WARN  | of-event-stats-9 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 10:22:57,554 | WARN  | f-event-stats-29 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 10:23:02,554 | WARN  | f-event-stats-21 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 49.931s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_nak_packet Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:23:10] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_netcfg b/src/test/results/dhcpl2relay/test_dhcpl2relay_netcfg
deleted file mode 100644
index b539bae..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_netcfg
+++ /dev/null
@@ -1,139 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ vi ../dhcpl2relay/dhcpl2relayTest.py
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_netcfg']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_netcfg (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.dhcpl2relay app is being installed
-The network configuration is shown = {u'dhcpl2relay': {u'dhcpServerConnectPoints': [u'of:000022589e4e744e/9', u'of:000022589e4e744e/1', u'of:000022589e4e744e/5', u'of:00003ee38e69ac49/12', u'of:000022589e4e744e/19', u'of:000022589e4e744e/12', u'of:00003e11096a754a/12', u'of:00003acc45bea44c/12']}}
-
-Test test_dhcpl2relay_netcfg has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.440s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_netcfg Success
-Done running tests
-Removing test container cord-tester1
-127.0.0.1 - - [10/Oct/2017 22:59:24] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_app_install b/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_app_install
deleted file mode 100644
index 233edbc..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_app_install
+++ /dev/null
@@ -1,136 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ vi ../dhcpl2relay/dhcpl2relayTest.py
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_sadis_app_install (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-
-Test test_dhcpl2relay_sadis_app_install has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.566s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_sadis_app_install Success
-Done running tests
-Removing test container cord-tester1
-127.0.0.1 - - [10/Oct/2017 22:56:20] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_netcfg b/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_netcfg
deleted file mode 100644
index 3c497ff..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_sadis_netcfg
+++ /dev/null
@@ -1,137 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_sadis_netcfg (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The network configuration is shown = {u'sadis': {u'integration': {u'cache': {u'maxsize': 50, u'enabled': False, u'ttl': u'PT0m'}}, u'entries': [{u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'uni-254', u'sTag': -1, u'hardwareIdentifier': u'00:0c:e2:22:36:00', u'ipAddress': u'10.70.47.64', u'port': u'254'}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'bbcae6507ba140a98eafdbee82bfc21d', u'sTag': -1, u'hardwareIdentifier': u'00:00:00:00:00:01', u'ipAddress': u'10.70.47.64', u'port': u'254'}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'br-int', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l1', u'sTag': -1, u'hardwareIdentifier': u'4e:11:a0:47:dd:08', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l2', u'sTag': -1, u'hardwareIdentifier': u'76:4e:f3:11:99:87', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l3', u'sTag': -1, u'hardwareIdentifier': u'92:a6:14:e6:b1:81', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l4', u'sTag': -1, u'hardwareIdentifier': u'7e:55:9a:bf:49:c8', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l5', u'sTag': -1, u'hardwareIdentifier': u'82:fb:8e:d2:25:b8', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l6', u'sTag': -1, u'hardwareIdentifier': u'fa:54:a3:ea:da:06', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l7', u'sTag': -1, u'hardwareIdentifier': u'2a:c3:73:37:3c:df', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l8', u'sTag': -1, u'hardwareIdentifier': u'aa:d3:76:04:56:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l9', u'sTag': -1, u'hardwareIdentifier': u'e2:07:ae:c5:ff:4f', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l10', u'sTag': -1, u'hardwareIdentifier': u'ae:84:49:8a:15:ba', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l11', u'sTag': -1, u'hardwareIdentifier': u'42:d5:8e:79:0d:28', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l12', u'sTag': -1, u'hardwareIdentifier': u'd2:a6:03:77:6d:0e', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l13', u'sTag': -1, u'hardwareIdentifier': u'a2:04:89:1e:c9:b9', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l14', u'sTag': -1, u'hardwareIdentifier': u'6a:66:b2:42:00:cf', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l15', u'sTag': -1, u'hardwareIdentifier': u'2e:8e:00:86:91:c8', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l16', u'sTag': -1, u'hardwareIdentifier': u'9e:3d:95:c7:be:9e', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l17', u'sTag': -1, u'hardwareIdentifier': u'26:57:a3:4f:da:1f', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l18', u'sTag': -1, u'hardwareIdentifier': u'82:7a:44:91:4b:1c', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l19', u'sTag': -1, u'hardwareIdentifier': u'd2:d3:e7:c8:8b:af', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l20', u'sTag': -1, u'hardwareIdentifier': u'ca:b3:bc:96:d4:c5', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l21', u'sTag': -1, u'hardwareIdentifier': u'52:d4:bb:e1:e0:a2', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'l22', u'sTag': -1, u'hardwareIdentifier': u'9a:d7:19:18:5b:8e', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'r23', u'sTag': -1, u'hardwareIdentifier': u'ee:7c:74:19:c9:45', u'ipAddress': u'10.70.47.64', u'port': 1}, {u'slot': 1, u'cTag': -1, u'nasPortId': u'1/1/2', u'nasId': u'1/1/2', u'id': u'None', u'sTag': -1, u'hardwareIdentifier': u'3e:e3:8e:69:ac:49', u'ipAddress': u'10.70.47.64', u'port': 1}]}}
-
-Test test_dhcpl2relay_sadis_netcfg has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.687s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_sadis_netcfg Success
-Done running tests
-Removing test container cord-tester1
-127.0.0.1 - - [10/Oct/2017 23:05:27] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet b/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet
deleted file mode 100644
index 5d174d8..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet
+++ /dev/null
@@ -1,211 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [12/Oct/2017 04:45:54] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Sending DHCP discover with lease time of 700
-client requested lease time in discover packer is not seen in server ACK packet as expected
-
-Test test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 35.839s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_discover_but_not_in_request_packet Success
-Done running tests
-127.0.0.1 - - [12/Oct/2017 04:46:43] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet b/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet
deleted file mode 100644
index 6f7b154..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet
+++ /dev/null
@@ -1,211 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [12/Oct/2017 04:52:15] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-client requested lease time in request packet seen in servre replied ACK packet as expected
-
-Test test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 35.867s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_specific_lease_time_only_in_request_but_not_in_discover_packet Success
-Done running tests
-127.0.0.1 - - [12/Oct/2017 04:53:03] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_N_requests b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_N_requests
deleted file mode 100644
index f182988..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_N_requests
+++ /dev/null
@@ -1 +0,0 @@
-### Right now we can't test this test cases
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server
deleted file mode 100644
index dab0366..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server
+++ /dev/null
@@ -1,97 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Existing connect point of dhcp server is [u'of:000022589e4e744e/12', u'of:000022589e4e744e/1', u'of:000022589e4e744e/9', u'of:000022589e4e744e/6', u'of:000022589e4e744e/7']
-Added array of connect points of dhcp server is [u'of:000022589e4e744e/12', 'of:000022589e4e744e/1', 'of:000022589e4e744e/5', 'of:000022589e4e744e/9', 'of:000022589e4e744e/19']
-org.opencord.dhcpl2relay app is being installed
-The network configuration is shown {u'dhcpl2relay': {u'dhcpServerConnectPoints': [u'of:000022589e4e744e/12', u'of:000022589e4e744e/1', u'of:000022589e4e744e/5', u'of:000022589e4e744e/9', u'of:000022589e4e744e/19']}}
-The loaded onos network configuration is = [u'of:000022589e4e744e/12', 'of:000022589e4e744e/1', 'of:000022589e4e744e/5', 'of:000022589e4e744e/9', 'of:000022589e4e744e/19']
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-
-Test test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server has errors and warnings
-
-2017-10-10 12:42:28,932 | WARN  | of-event-stats-9 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-10 12:42:33,931 | WARN  | f-event-stats-27 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-10 12:42:34,750 | WARN  |  I/O worker #341 | PacketManager                    | 127 - org.onosproject.onos-core-net - 1.10.6 | Packet processor org.opencord.dhcpl2relay.DhcpL2Relay$DhcpRelayPacketProcessor@5c548042 threw an exception
-java.lang.NullPointerException
-2017-10-10 12:42:35,864 | WARN  |  I/O worker #341 | PacketManager                    | 127 - org.onosproject.onos-core-net - 1.10.6 | Packet processor org.opencord.dhcpl2relay.DhcpL2Relay$DhcpRelayPacketProcessor@5c548042 threw an exception
-java.lang.NullPointerException
-Exception:
-        at org.opencord.dhcpl2relay.DhcpL2Relay.nasPortId(DhcpL2Relay.java:338)[208:org.opencord.dhcpl2relay:1.2.0.SNAPSHOT]
-        at org.opencord.dhcpl2relay.DhcpL2Relay.access$800(DhcpL2Relay.java:92)[208:org.opencord.dhcpl2relay:1.2.0.SNAPSHOT]
-        at org.opencord.dhcpl2relay.DhcpL2Relay$DhcpRelayPacketProcessor.processDhcpPacketFromClient(DhcpL2Relay.java:520)[208:org.opencord.dhcpl2relay:1.2.0.SNAPSHOT]
-        at org.opencord.dhcpl2relay.DhcpL2Relay$DhcpRelayPacketProcessor.processDhcpPacket(DhcpL2Relay.java:479)[208:org.opencord.dhcpl2relay:1.2.0.SNAPSHOT]
-        at org.opencord.dhcpl2relay.DhcpL2Relay$DhcpRelayPacketProcessor.process(DhcpL2Relay.java:413)[208:org.opencord.dhcpl2relay:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:389)[127:org.onosproject.onos-core-net:1.10.6]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[193:org.onosproject.onos-providers-openflow-packet:1.10.6]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:335)[191:org.onosproject.onos-protocols-openflow-ctl:1.10.6]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:746)[191:org.onosproject.onos-protocols-openflow-ctl:1.10.6]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:267)[189:org.onosproject.onos-protocols-openflow-api:1.10.6]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 32.291s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server Success
-Done running tests
-127.0.0.1 - - [10/Oct/2017 05:42:36] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_renew_time b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_renew_time
deleted file mode 100644
index e69de29..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_renew_time
+++ /dev/null
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_request_after_reboot b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_request_after_reboot
deleted file mode 100644
index c07f344..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_request_after_reboot
+++ /dev/null
@@ -1,214 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown       Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 22:41:49] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_client_request_after_reboot (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 .
-client rebooting...
-client got same IP after reboot, as expected
-
-Test test_dhcpl2relay_with_client_request_after_reboot has errors and warnings
-
-2017-10-12 05:42:32,530 | WARN  | of-event-stats-5 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-12 05:42:37,554 | WARN  | f-event-stats-21 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 58.340s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_client_request_after_reboot Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 22:43:00] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message
deleted file mode 100644
index 2a52163..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message
+++ /dev/null
@@ -1,212 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:25:44] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-dhcp server offered IP address with client requested lease time
-
-Test test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message has errors and warnings
-
-2017-10-11 10:26:27,554 | WARN  | f-event-stats-14 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 39.880s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_client_requests_with_specific_lease_time_in_discover_message Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:26:36] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address
deleted file mode 100644
index 2af80f9..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address
+++ /dev/null
@@ -1,211 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:15:23] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_clients_desired_address (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client desired IP 192.168.1.31 from server 192.168.1.2 for mac 02:03:04:00:00:01 as expected
-
-Test test_dhcpl2relay_with_clients_desired_address has errors and warnings
-
-2017-10-11 10:16:07,546 | WARN  | f-event-stats-18 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 39.707s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:16:15] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address_in_out_of_pool b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address_in_out_of_pool
deleted file mode 100644
index 48c526d..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_clients_desired_address_in_out_of_pool
+++ /dev/null
@@ -1,209 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:19:42] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_clients_desired_address_in_out_of_pool (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-server offered IP from its pool when requested out of pool IP, as expected
-
-Test test_dhcpl2relay_with_clients_desired_address_in_out_of_pool has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 39.797s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_clients_desired_address_in_out_of_pool Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:20:34] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_ctag_options b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_ctag_options
deleted file mode 100644
index 3924624..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_ctag_options
+++ /dev/null
@@ -1,215 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:03:05] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_different_ctag_options (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The C Tag info from network configuration is = 600
-
-Test test_dhcpl2relay_with_different_ctag_options has errors and warnings
-
-2017-10-11 09:03:52,529 | WARN  | of-event-stats-7 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:03:52,935 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 09:03:57,527 | WARN  | of-event-stats-5 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:04:02,554 | WARN  | of-event-stats-4 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.333s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_different_ctag_options Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:04:10] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_stag_options b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_stag_options
deleted file mode 100644
index 80cc255..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_stag_options
+++ /dev/null
@@ -1,215 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:07:15] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_different_stag_options (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The S Tag info from the network configuration is = 600
-
-Test test_dhcpl2relay_with_different_stag_options has errors and warnings
-
-2017-10-11 09:07:57,529 | WARN  | f-event-stats-17 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:08:02,528 | WARN  | f-event-stats-12 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:08:07,527 | WARN  | f-event-stats-24 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:08:12,527 | WARN  | of-event-stats-8 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.319s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_different_stag_options Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:08:19] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_uni_port_entry_sadis_config b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_uni_port_entry_sadis_config
deleted file mode 100644
index 7d1d3c2..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_different_uni_port_entry_sadis_config
+++ /dev/null
@@ -1,217 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 01:59:19] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_different_uni_port_entry_sadis_config (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The network configuration is shown = uni-200
-
-Test test_dhcpl2relay_with_different_uni_port_entry_sadis_config has errors and warnings
-
-2017-10-11 09:00:02,529 | WARN  | of-event-stats-1 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:00:07,527 | WARN  | of-event-stats-5 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:00:10,253 | ERROR |  I/O worker #340 | SadisManager                     | 243 - org.opencord.sadis-app - 1.2.0.SNAPSHOT | Data not found for id uni-254
-2017-10-11 09:00:12,527 | WARN  | f-event-stats-23 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:00:17,121 | ERROR |  I/O worker #340 | SadisManager                     | 243 - org.opencord.sadis-app - 1.2.0.SNAPSHOT | Data not found for id uni-254
-2017-10-11 09:00:17,121 | ERROR |  I/O worker #340 | SadisManager                     | 243 - org.opencord.sadis-app - 1.2.0.SNAPSHOT | Data not found for id uni-254
-2017-10-11 09:00:17,528 | WARN  | f-event-stats-15 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.293s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_different_uni_port_entry_sadis_config Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:00:24] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_nasportid_different_from_id b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_nasportid_different_from_id
deleted file mode 100644
index b0fa7ee..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_nasportid_different_from_id
+++ /dev/null
@@ -1,213 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:13:11] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_nasportid_different_from_id (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The nasPortId info from network configuration is shown = uni-509
-
-Test test_dhcpl2relay_with_nasportid_different_from_id has errors and warnings
-
-2017-10-11 09:13:57,527 | WARN  | f-event-stats-28 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:14:02,526 | WARN  | f-event-stats-18 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:14:07,554 | WARN  | f-event-stats-27 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.391s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_nasportid_different_from_id Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:14:16] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_release b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_release
deleted file mode 100644
index 838947c..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_release
+++ /dev/null
@@ -1,214 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:45:02] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_one_release (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-Releasing ip 192.168.1.11 to server 192.168.1.2
-Triggering DHCP discover again after release
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-Verifying released IP was given back on rediscover
-Test done. Releasing ip 192.168.1.11 to server 192.168.1.2
-
-Test test_dhcpl2relay_with_one_release has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 36.028s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_one_release Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:45:50] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_request b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_request
deleted file mode 100644
index 5a39e34..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_one_request
+++ /dev/null
@@ -1,137 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit_olt --olt-arg=00:0c:e2:31:05:00 --disable-teardown --disable-cleanup --test-mode
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_one_request (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-
-Test test_dhcpl2relay_with_one_request has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 34.717s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_one_request Success
-Done running tests
-Removing test container cord-tester1
-127.0.0.1 - - [10/Oct/2017 22:47:15] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
-
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_discovers b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_discovers
deleted file mode 100644
index f2f8475..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_discovers
+++ /dev/null
@@ -1,212 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:01:34] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_same_client_and_multiple_discovers (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01 . Not going to send DHCPREQUEST.
-Triggering DHCP discover again.
-got same ip to smae the client when sent discover again, as expected
-
-Test test_dhcpl2relay_with_same_client_and_multiple_discovers has errors and warnings
-
-2017-10-11 10:02:17,530 | WARN  | f-event-stats-12 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 39.727s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_discovers Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:02:26] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_requests b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_requests
deleted file mode 100644
index 4e4312e..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_same_client_and_multiple_requests
+++ /dev/null
@@ -1,214 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 03:04:53] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_same_client_and_multiple_requests (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-Sending DHCP discover and DHCP request.
-Got dhcp client IP 192.168.1.11 from server 192.168.1.2 for mac 02:03:04:00:00:01
-Sending DHCP request again.
-got same ip to smae the client when sent request again, as expected
-
-Test test_dhcpl2relay_with_same_client_and_multiple_requests has errors and warnings
-
-2017-10-11 10:05:37,529 | WARN  | f-event-stats-23 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 40.036s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_same_client_and_multiple_requests Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 03:05:46] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis b/src/test/results/dhcpl2relay/test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis
deleted file mode 100644
index e7a17e9..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis
+++ /dev/null
@@ -1,214 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ vi ../dhcpl2relay/dhcpl2relayTest.py
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 00:40:11] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The S Tag and C Tag info from network configuration are 500 and 600 respectively
-
-Test test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis has errors and warnings
-
-2017-10-11 07:40:57,527 | WARN  | of-event-stats-8 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:41:02,529 | WARN  | f-event-stats-30 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:41:07,528 | WARN  | f-event-stats-13 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.110s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 00:41:15] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_nasportid_option_in_sadis b/src/test/results/dhcpl2relay/test_dhcpl2relay_without_nasportid_option_in_sadis
deleted file mode 100644
index 5eb13c3..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_nasportid_option_in_sadis
+++ /dev/null
@@ -1,217 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:10:01] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_without_nasportid_option_in_sadis (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The nasPortId info from network configuration is shown =
-
-Test test_dhcpl2relay_without_nasportid_option_in_sadis has errors and warnings
-
-2017-10-11 09:10:47,527 | WARN  | f-event-stats-25 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:10:52,554 | WARN  | of-event-stats-8 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:10:53,792 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 09:10:57,526 | WARN  | of-event-stats-7 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:11:02,528 | WARN  | f-event-stats-19 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.452s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_without_nasportid_option_in_sadis Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:11:06] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_sadis_app b/src/test/results/dhcpl2relay/test_dhcpl2relay_without_sadis_app
deleted file mode 100644
index 0e62ac3..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_sadis_app
+++ /dev/null
@@ -1,211 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 00:49:01] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_without_sadis_app (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-
-Test test_dhcpl2relay_without_sadis_app has errors and warnings
-
-2017-10-11 07:49:47,528 | WARN  | of-event-stats-7 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 07:49:47,971 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 07:49:52,527 | WARN  | f-event-stats-29 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.221s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_without_sadis_app Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 00:49:58] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_serial_id_of_olt b/src/test/results/dhcpl2relay/test_dhcpl2relay_without_serial_id_of_olt
deleted file mode 100644
index a43af29..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_without_serial_id_of_olt
+++ /dev/null
@@ -1,220 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:16:07] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_without_serial_id_of_olt (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The serial Id info from network configuration is shown =
-
-Test test_dhcpl2relay_without_serial_id_of_olt has errors and warnings
-
-2017-10-11 09:16:51,460 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 09:16:52,527 | WARN  | f-event-stats-17 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:16:57,530 | WARN  | f-event-stats-12 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:16:58,463 | ERROR |  I/O worker #340 | SadisManager                     | 243 - org.opencord.sadis-app - 1.2.0.SNAPSHOT | Data not found for id d9f17c736cb04df3b583ea54ef58e9ad
-2017-10-11 09:16:58,463 | WARN  |  I/O worker #340 | DhcpL2Relay                      | 244 - org.opencord.dhcpl2relay - 1.2.0.SNAPSHOT | Device not found for of:0001000000000001/254
-2017-10-11 09:16:58,463 | WARN  |  I/O worker #340 | DhcpL2Relay                      | 244 - org.opencord.dhcpl2relay - 1.2.0.SNAPSHOT | RelayAgent MAC not found
-2017-10-11 09:17:02,530 | WARN  | f-event-stats-16 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:17:07,525 | WARN  | f-event-stats-27 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.344s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_without_serial_id_of_olt Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:17:12] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/dhcpl2relay/test_dhcpl2relay_wrong_serial_id_of_olt b/src/test/results/dhcpl2relay/test_dhcpl2relay_wrong_serial_id_of_olt
deleted file mode 100644
index c1cd49b..0000000
--- a/src/test/results/dhcpl2relay/test_dhcpl2relay_wrong_serial_id_of_olt
+++ /dev/null
@@ -1,217 +0,0 @@
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-ubuntu@olt-tester:~/cord-tester/src/test/setup$ ./voltha-test.py --test-type=dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt --manifest=manifest-olt-voltha-local.json --voltha-host=10.70.47.64 --olt-type=tibit-olt --olt-arg=00:0c:e2:31:05:00 --disable-cleanup --disable-teardown
-Starting CordTester Web Server
- * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
- * Restarting with stat
- * Debugger is active!
- * Debugger PIN: 718-316-938
-WARNING: No route found for IPv6 destination :: (no default route?)
-Cleaning up existing cluster volumes
-Onos IP 10.70.47.64
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth0 -l l1 cord-tester1 192.168.100.1/24 02:03:04:00:00:01
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth2 -l l2 cord-tester1 192.168.100.2/24 02:03:04:00:00:02
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth4 -l l3 cord-tester1 192.168.100.3/24 02:03:04:00:00:03
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth6 -l l4 cord-tester1 192.168.100.4/24 02:03:04:00:00:04
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth8 -l l5 cord-tester1 192.168.100.5/24 02:03:04:00:00:05
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth10 -l l6 cord-tester1 192.168.100.6/24 02:03:04:00:00:06
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth12 -l l7 cord-tester1 192.168.100.7/24 02:03:04:00:00:07
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth14 -l l8 cord-tester1 192.168.100.8/24 02:03:04:00:00:08
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth16 -l l9 cord-tester1 192.168.100.9/24 02:03:04:00:00:09
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth18 -l l10 cord-tester1 192.168.100.10/24 02:03:04:00:00:0a
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth20 -l l11 cord-tester1 192.168.100.11/24 02:03:04:00:00:0b
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth22 -l l12 cord-tester1 192.168.100.12/24 02:03:04:00:00:0c
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth24 -l l13 cord-tester1 192.168.100.13/24 02:03:04:00:00:0d
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth26 -l l14 cord-tester1 192.168.100.14/24 02:03:04:00:00:0e
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth28 -l l15 cord-tester1 192.168.100.15/24 02:03:04:00:00:0f
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth30 -l l16 cord-tester1 192.168.100.16/24 02:03:04:00:00:10
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth32 -l l17 cord-tester1 192.168.100.17/24 02:03:04:00:00:11
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth34 -l l18 cord-tester1 192.168.100.18/24 02:03:04:00:00:12
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth36 -l l19 cord-tester1 192.168.100.19/24 02:03:04:00:00:13
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth38 -l l20 cord-tester1 192.168.100.20/24 02:03:04:00:00:14
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth40 -l l21 cord-tester1 192.168.100.21/24 02:03:04:00:00:15
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth42 -l l22 cord-tester1 192.168.100.22/24 02:03:04:00:00:16
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth44 -l l23 cord-tester1 192.168.100.23/24 02:03:04:00:00:17
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth46 -l l24 cord-tester1 192.168.100.24/24 02:03:04:00:00:18
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth48 -l l25 cord-tester1 192.168.100.25/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth50 -l l26 cord-tester1 192.168.100.26/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth52 -l l27 cord-tester1 192.168.100.27/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth54 -l l28 cord-tester1 192.168.100.28/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth56 -l l29 cord-tester1 192.168.100.29/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth58 -l l30 cord-tester1 192.168.100.30/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth60 -l l31 cord-tester1 192.168.100.31/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth62 -l l32 cord-tester1 192.168.100.32/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth64 -l l33 cord-tester1 192.168.100.33/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth66 -l l34 cord-tester1 192.168.100.34/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth68 -l l35 cord-tester1 192.168.100.35/24
-RTNETLINK answers: File exists
-Running PIPEWORK cmd: pipework enp1s0f0 -i veth70 -l l36 cord-tester1 192.168.100.36/24
-RTNETLINK answers: File exists
-Provisioning guest port vcpe0 for cord-tester1 with host port: enp1s0f0, s_tag: 222, c_tag: 111
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe1 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 304
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe2 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 305
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe3 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 306
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe4 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 307
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe5 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 308
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe6 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 309
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe7 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 310
-
-RTNETLINK answers: File exists
-Provisioning guest port vcpe8 for cord-tester1 with host port: enp1s0f0, s_tag: 304, c_tag: 311
-
-RTNETLINK answers: File exists
-Test container cord-tester1 started and provisioned to run tests using nosetests
-127.0.0.1 - - [11/Oct/2017 02:19:05] "POST /start HTTP/1.1" 200 -
-Running test case dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt
-WARNING: No route found for IPv6 destination :: (no default route?)
-Controller IP [u'10.70.47.64'], Test type dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt
-Installing ONOS cord apps
-# Host [10.70.47.64]:8101 found: line 5
-/root/.ssh/known_hosts updated.
-Original contents retained as /root/.ssh/known_hosts.old
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app sadis-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app olt-app, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 3.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 3.0-SNAPSHOT installed
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-Radius server running with IP 11.0.0.3
-Running ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt']
-Running tests: ['dhcpl2relay:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt']
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Voltha device of:0001000000000001 not in map. Using uplink vlan 333
-Installing OLT app /root/test/src/test/utils/../apps/olt-app-3.0-SNAPSHOT.oar
-Connecting to controller at 10.70.47.64
-onoscli: Trying to connect to 10.70.47.64
-# Host [10.70.47.64]:8101 found: line 1 type RSA
-Spawning pexpect for ip 10.70.47.64
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 10.70.47.64 closed.
-
-Running command: ip link add link veth48 name veth48.333 type vlan id 333
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333 up
-Running command: ip link add link veth48.333 name veth48.333.254 type vlan id 254
-RTNETLINK answers: File exists
-Running command: ip link set veth48.333.254 up
-Configuring OVS flow for port 37, s_tag 333
-test_dhcpl2relay_with_wrong_serial_id_of_olt (dhcpl2relay.dhcpl2relayTest.dhcpl2relay_exchange) ... # Host [10.70.47.64]:8101 found: line 1 type RSA
-org.opencord.sadis app is being installed
-The serial Id info from network configuration is shown = 07f20d06696041febf974ccdhdhhjh37
-
-Test test_dhcpl2relay_with_wrong_serial_id_of_olt has errors and warnings
-
-2017-10-11 09:19:52,103 | WARN  |  I/O worker #340 | IgmpSnoop                        | 203 - org.opencord.igmp - 1.3.0.SNAPSHOT | No SSM translate source found for group 224.0.0.251
-2017-10-11 09:19:52,527 | WARN  | f-event-stats-20 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:19:56,570 | ERROR |  I/O worker #340 | SadisManager                     | 243 - org.opencord.sadis-app - 1.2.0.SNAPSHOT | Data not found for id d9f17c736cb04df3b583ea54ef58e9ad
-2017-10-11 09:19:56,570 | WARN  |  I/O worker #340 | DhcpL2Relay                      | 244 - org.opencord.dhcpl2relay - 1.2.0.SNAPSHOT | Device not found for of:0001000000000001/254
-2017-10-11 09:19:56,570 | WARN  |  I/O worker #340 | DhcpL2Relay                      | 244 - org.opencord.dhcpl2relay - 1.2.0.SNAPSHOT | RelayAgent MAC not found
-2017-10-11 09:19:57,526 | WARN  | of-event-stats-9 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-2017-10-11 09:20:02,554 | WARN  | f-event-stats-26 | FlowModBuilderVer13              | 194 - org.onosproject.onos-providers-openflow-flow - 1.10.6 | Match type INNER_VLAN_VID not yet implemented.
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 52.397s
-
-OK
-Test dhcpl2relayTest.py:dhcpl2relay_exchange.test_dhcpl2relay_with_wrong_serial_id_of_olt Success
-Done running tests
-127.0.0.1 - - [11/Oct/2017 02:20:10] "POST /test HTTP/1.1" 200 -
-ubuntu@olt-tester:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/onboarding/onboarding-tests-output.log b/src/test/results/onboarding/onboarding-tests-output.log
deleted file mode 100644
index 6fb5881..0000000
--- a/src/test/results/onboarding/onboarding-tests-output.log
+++ /dev/null
@@ -1,573 +0,0 @@
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_health
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... Pinging ONBOARDED SERVICE mysite_vsg-3 at IP 172.27.0.4
-ONBOARDED SERVICE mysite_vsg-3 at IP 172.27.0.4 is reachable
-Pinging ONBOARDED SERVICE mysite_exampleservice-2 at IP 172.27.0.3
-ONBOARDED SERVICE mysite_exampleservice-2 at IP 172.27.0.3 is reachable
-Pinging ONBOARDED SERVICE mysite_vsg-1 at IP 172.27.0.2
-ONBOARDED SERVICE mysite_vsg-1 at IP 172.27.0.2 is reachable
-
-Test test_exampleservice_health has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 10.722s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_login
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... Checking if ONBOARDING SERVICE at 172.27.0.4 is accessible from compute node sorrowful-cat.cord.lab
-OK
-Checking if ONBOARDING SERVICE at 172.27.0.3 is accessible from compute node sorrowful-cat.cord.lab
-OK
-Checking if ONBOARDING SERVICE at 172.27.0.2 is accessible from compute node sorrowful-cat.cord.lab
-OK
-
-Test test_exampleservice_for_login has errors and warnings
-
-2017-06-13 07:35:09,846 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 14.237s
-
-OK
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_default_route_through_testclient
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ...
-Test test_exampleservice_for_default_route_through_testclient has errors and warnings
-
-2017-06-13 07:36:40,911 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 18.468s
-
-OK
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_service_access_through_testclientUnable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ...
-Test test_exampleservice_for_service_access_through_testclient has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 11.682s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_service_reachability_from_cord_tester
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... mv: cannot move '/etc/resolv.conf.dhclient-new.18668' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-
-Test test_exampleservice_for_service_reachability_from_cord_tester has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 12.372s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_operational_status_from_testclient
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ...
-Test test_exampleservice_operational_status_from_testclient has errors and warnings
-
-2017-06-13 09:36:34,324 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 12.733s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_operational_access_from_cord_tester
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.20722' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-
-Test test_exampleservice_operational_access_from_cord_tester has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 10.716s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_service_message
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... mv: cannot move '/etc/resolv.conf.dhclient-new.20804' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-
-Test test_exampleservice_for_service_message has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 10.925s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_tenant_message
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... mv: cannot move '/etc/resolv.conf.dhclient-new.20886' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-
-Test test_exampleservice_for_tenant_message has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 11.205s
-
-OK
-
-*************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_subscriber_interface_toggle
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are False and 409
-Algo: ... mv: cannot move '/etc/resolv.conf.dhclient-new.21179' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.21204' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-
-Test test_exampleservice_access_after_subscriber_interface_toggle has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 14.538s
-
-OK
-***************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_service_paused
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... mv: cannot move '/etc/resolv.conf.dhclient-new.21287' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Pausing example service running vm
-
-Test test_exampleservice_access_after_service_paused has errors and warnings
-
-2017-06-13 10:07:30,728 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.900s
-
-OK
-
-*************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_service_restart
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.22044' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Restarting example service running vm
-RTNETLINK answers: No such process
-
-Test test_exampleservice_access_after_service_restart has errors and warnings
-
-2017-06-13 10:26:20,898 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 10:26:21,341 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 19]
-2017-06-13 10:26:21,941 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 20]
-2017-06-13 10:26:22,538 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 21]
-2017-06-13 10:26:37,144 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 48.066s
-
-OK
-***************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_service_stop
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.23650' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Stopping example service running vm
-RTNETLINK answers: No such process
-
-Test test_exampleservice_access_after_service_stop has errors and warnings
-
-2017-06-13 11:15:34,207 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 34]
-2017-06-13 11:15:34,853 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 35]
-2017-06-13 11:15:35,566 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 36]
-2017-06-13 11:16:12,285 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 11:16:15,537 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 11:16:20,552 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 11:16:27,806 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 79.389s
-
-OK
-root@495131b1afec:~/test/src/test#
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_service_message_after_service_stop_and_start
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are False and 409
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.24066' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Stopping example service running VM
-RTNETLINK answers: No such process
-
-Test test_exampleservice_for_service_message_after_service_stop_and_start has errors and warnings
-
-2017-06-13 11:22:22,025 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 40]
-2017-06-13 11:22:22,731 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 41]
-2017-06-13 11:22:23,416 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 42]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 49.291s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_for_tenant_message_after_service_restart
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.24656' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Restarting example service running VM
-RTNETLINK answers: No such process
-
-Test test_exampleservice_for_tenant_message_after_service_restart has errors and warnings
-
-2017-06-13 11:32:22,858 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 46]
-2017-06-13 11:32:23,457 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 47]
-2017-06-13 11:32:24,056 | WARN  | source-registrar | ResourceDeviceListener           | 131 - org.onosproject.onos-core-net - 1.8.7 | Failed to register Bandwidth for [of:0000525400009f10, 48]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.383s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_vcpe_instance_restart
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.26158' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.26185' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-
-Test test_exampleservice_access_after_vcpe_instance_restart has errors and warnings
-
-2017-06-13 11:52:02,204 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 26.920s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_vcpe_instance_wan_interface_toggle
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.2825' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.2876' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.2907' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-
-Test test_exampleservice_access_after_vcpe_instance_wan_interface_toggle has errors and warnings
-
-2017-06-13 13:05:22,040 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:05:34,646 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.processDhcp(CordVtnDhcpProxy.java:220)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor.process(CordVtnDhcpProxy.java:198)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.onosproject.net.packet.impl.PacketManager$InternalPacketProviderService.processPacket(PacketManager.java:373)[131:org.onosproject.onos-core-net:1.8.7]
-        at org.onosproject.provider.of.packet.impl.OpenFlowPacketProvider$InternalPacketProvider.handlePacket(OpenFlowPacketProvider.java:171)[176:org.onosproject.onos-providers-openflow-packet:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl.processPacket(OpenFlowControllerImpl.java:316)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.impl.OpenFlowControllerImpl$OpenFlowSwitchAgent.processMessage(OpenFlowControllerImpl.java:727)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-        at org.onosproject.openflow.controller.driver.AbstractOpenFlowSwitch.handleMessage(AbstractOpenFlowSwitch.java:266)[161:org.onosproject.onos-protocols-openflow-api:1.8.7]
-        at org.onosproject.openflow.controller.impl.OFChannelHandler.dispatchMessage(OFChannelHandler.java:1254)[174:org.onosproject.onos-protocols-openflow-ctl:1.8.7]
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 40.789s
-
-OK
-
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_access_after_firewall_rule_added_to_drop_service_running_server_in_vcpe_instance
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.3568' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-
-Test test_exampleservice_access_after_firewall_rule_added_to_drop_service_running_server_in_vcpe_instance has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.103s
-
-OK
-
-*************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_xos_subcriber_access_exampleservice
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... index and its type are 0, <type 'int'>
-get url... http://xos:9000/api/tenant/cord/subscriber/
-Test passed: 200: [{"humanReadableName":"cordSubscriber-1","id":1,"features":{"cdn":false,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"123","name":"My House"}},{"humanReadableName":"cordSubscriber-3","id":3,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"200","name":"My House 0"}},{"humanReadableName":"cordSubscriber-4","id":4,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"201","name":"My House 1"}},{"humanReadableName":"cordSubscriber-5","id":5,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"202","name":"My House 2"}},{"humanReadableName":"cordSubscriber-6","id":6,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"203","name":"My House 3"}},{"humanReadableName":"cordSubscriber-7","id":7,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"204","name":"My House 4"}}]
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.3879' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-Deleting tenant with s_tag: 304, c_tag: 304
-get url... http://xos:9000/api/tenant/cord/volt/
-Test passed: 200: [{"humanReadableName":"VOLTTenantForAPI-7","id":7,"service_specific_id":"123","s_tag":"222","c_tag":"111","subscriber":1,"related":{"instance_id":1,"instance_name":"mysite_vsg","vsg_id":8,"wan_container_ip":"10.6.1.131","compute_node_name":"sorrowful-cat"}},{"humanReadableName":"VOLTTenantForAPI-16","id":16,"service_specific_id":null,"s_tag":"304","c_tag":"304","subscriber":3,"related":{}},{"humanReadableName":"VOLTTenantForAPI-19","id":19,"service_specific_id":null,"s_tag":"304","c_tag":"305","subscriber":4,"related":{"instance_id":3,"instance_name":"mysite_vsg","vsg_id":20,"wan_container_ip":"10.6.1.135","compute_node_name":"sorrowful-cat"}},{"humanReadableName":"VOLTTenantForAPI-22","id":22,"service_specific_id":null,"s_tag":"304","c_tag":"306","subscriber":5,"related":{"instance_id":3,"instance_name":"mysite_vsg","vsg_id":23,"wan_container_ip":"10.6.1.136","compute_node_name":"sorrowful-cat"}},{"humanReadableName":"VOLTTenantForAPI-25","id":25,"service_specific_id":null,"s_tag":"304","c_tag":"307","subscriber":6,"related":{"instance_id":3,"instance_name":"mysite_vsg","vsg_id":26,"wan_container_ip":"10.6.1.137","compute_node_name":"sorrowful-cat"}},{"humanReadableName":"VOLTTenantForAPI-28","id":28,"service_specific_id":null,"s_tag":"304","c_tag":"308","subscriber":7,"related":{"instance_id":3,"instance_name":"mysite_vsg","vsg_id":29,"wan_container_ip":"10.6.1.138","compute_node_name":"sorrowful-cat"}}]
-Deleting subscriber ID 3 for account num 200
-url http://xos:9000/api/tenant/cord/subscriber/3
-Test passed: 204:
-Deleting VOLT Tenant ID 16 for subscriber 3
-url http://xos:9000/api/tenant/cord/volt/16
-Test failed: 404: {"detail":"Not found."}
-
-Test test_exampleservice_xos_subcriber_access_exampleservice has no errors and warnings in the logs
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 11.053s
-
-OK
-**************************************************
-root@495131b1afec:~/test/src/test# nosetests -v -s onboarding/onboardingTest.py:onboarding_exchange.test_exampleservice_multiple_subcribers_access_same_service
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Algo: ... index and its type are 0, <type 'int'>
-get url... http://xos:9000/api/tenant/cord/subscriber/
-Test passed: 200: [{"humanReadableName":"cordSubscriber-1","id":1,"features":{"cdn":false,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"123","name":"My House"}},{"humanReadableName":"cordSubscriber-4","id":4,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"201","name":"My House 1"}},{"humanReadableName":"cordSubscriber-5","id":5,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"202","name":"My House 2"}},{"humanReadableName":"cordSubscriber-6","id":6,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"203","name":"My House 3"}},{"humanReadableName":"cordSubscriber-7","id":7,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"204","name":"My House 4"}}]
-Creating tenant with s_tag: 304, c_tag: 304
-url, data.. http://xos:9000/api/tenant/cord/subscriber/ {"features": {"status": "enabled", "cdn": true, "uplink_speed": 1000000000, "downlink_speed": 1000000000, "uverse": true}, "identity": {"account_num": "200", "name": "My House 0"}}
-requests.codes..... 201
-Test passed: 201: {"humanReadableName":"cordSubscriber-8","id":8,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"200","name":"My House 0"}}
-get url... http://xos:9000/api/tenant/cord/subscriber/
-Test passed: 200: [{"humanReadableName":"cordSubscriber-1","id":1,"features":{"cdn":false,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"123","name":"My House"}},{"humanReadableName":"cordSubscriber-4","id":4,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"201","name":"My House 1"}},{"humanReadableName":"cordSubscriber-5","id":5,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"202","name":"My House 2"}},{"humanReadableName":"cordSubscriber-6","id":6,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"203","name":"My House 3"}},{"humanReadableName":"cordSubscriber-7","id":7,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"204","name":"My House 4"}},{"humanReadableName":"cordSubscriber-8","id":8,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"200","name":"My House 0"}}]
-Subscriber ID for account num 200 = 8
-url, data.. http://xos:9000/api/tenant/cord/volt/ {"subscriber": "8", "c_tag": "304", "s_tag": "304"}
-requests.codes..... 201
-Test passed: 201: {"humanReadableName":"VOLTTenantForAPI-31","id":31,"service_specific_id":null,"s_tag":"304","c_tag":"304","subscriber":8,"related":{}}
-Delaying 350 seconds for the VCPE to be provisioned
-Testing for external connectivity to VCPE vcpe-304-304
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.5742' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.18 for vcpe1.304.304
-Sending icmp echo requests to external network 8.8.8.8
-index and its type are 1, <type 'int'>
-get url... http://xos:9000/api/tenant/cord/subscriber/
-Test passed: 200: [{"humanReadableName":"cordSubscriber-1","id":1,"features":{"cdn":false,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"123","name":"My House"}},{"humanReadableName":"cordSubscriber-4","id":4,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"201","name":"My House 1"}},{"humanReadableName":"cordSubscriber-5","id":5,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"202","name":"My House 2"}},{"humanReadableName":"cordSubscriber-6","id":6,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"203","name":"My House 3"}},{"humanReadableName":"cordSubscriber-7","id":7,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"204","name":"My House 4"}},{"humanReadableName":"cordSubscriber-8","id":8,"features":{"cdn":true,"uplink_speed":1000000000,"downlink_speed":1000000000,"uverse":true,"status":"enabled"},"identity":{"account_num":"200","name":"My House 0"}}]
-mv: cannot move '/etc/resolv.conf.dhclient-new.5782' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: No such process
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.5809' to '/etc/resolv.conf': Device or resource busy
-route is 10.6.1.194
-RTNETLINK answers: File exists
-RTNETLINK answers: No such process
-
-Test test_exampleservice_multiple_subcribers_access_same_service has errors and warnings
-
-2017-06-13 13:14:38,541 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:14:41,044 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:14:48,484 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:15:01,721 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:15:08,856 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:15:23,898 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:15:33,543 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:15:47,928 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:16:03,840 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:16:12,901 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:16:23,450 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:16:35,270 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:16:56,240 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:17:13,233 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:17:28,468 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:17:43,372 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:17:59,724 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:18:10,774 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:18:28,186 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:18:48,009 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:19:02,425 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:19:11,112 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:19:32,875 | WARN  | ew I/O worker #2 | PacketManager                    | 131 - org.onosproject.onos-core-net - 1.8.7 | Packet processor org.opencord.cordvtn.impl.CordVtnDhcpProxy$InternalPacketProcessor@64be463 threw an exception
-java.lang.IllegalArgumentException
-2017-06-13 13:19:41,974 | ERROR | er-event-handler | DependencyHandler                | 169 - org.opencord.vtn - 1.2.0.SNAPSHOT | Uncaught exception on DependencyHandler-event-handler
-java.lang.IllegalArgumentException
-2017-06-13 13:19:41,976 | ERROR | er-event-handler | ManagementInstanceHandler        | 169 - org.opencord.vtn - 1.2.0.SNAPSHOT | Uncaught exception on ManagementInstanceHandler-event-handler
-java.lang.IllegalArgumentException
-2017-06-13 13:19:41,979 | ERROR | er-event-handler | DefaultInstanceHandler           | 169 - org.opencord.vtn - 1.2.0.SNAPSHOT | Uncaught exception on DefaultInstanceHandler-event-handler
-java.lang.IllegalArgumentException
-2017-06-13 13:19:41,980 | ERROR | er-event-handler | AccessAgentInstanceHandler       | 169 - org.opencord.vtn - 1.2.0.SNAPSHOT | Uncaught exception on AccessAgentInstanceHandler-event-handler
-java.lang.IllegalArgumentException
-Exception:
-        at com.google.common.base.Preconditions.checkArgument(Preconditions.java:108)[44:com.google.guava:19.0.0]
-        at org.opencord.cordvtn.api.core.Instance.of(Instance.java:70)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.handler.AbstractInstanceHandler$InternalHostListener.handle(AbstractInstanceHandler.java:225)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at org.opencord.cordvtn.impl.handler.AbstractInstanceHandler$InternalHostListener.lambda$event$0(AbstractInstanceHandler.java:215)[169:org.opencord.vtn:1.2.0.SNAPSHOT]
-        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)[:1.8.0_131]
-        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)[:1.8.0_131]
-        at java.lang.Thread.run(Thread.java:748)[:1.8.0_131]
-2017-06-13 13:19:46,303 | INFO  | 2]-nio2-thread-3 | ServerSession                    | 30 - org.apache.sshd.core - 0.14.0 | Server session created from /172.19.0.4:60846
-2017-06-13 13:19:46,516 | INFO  | 2]-nio2-thread-8 | ServerUserAuthService            | 30 - org.apache.sshd.core - 0.14.0 | Session karaf@/172.19.0.4:60846 authenticated
-2017-06-13 13:19:46,521 | INFO  | 2]-nio2-thread-5 | ChannelSession                   | 30 - org.apache.sshd.core - 0.14.0 | Executing command: cat /root/onos/apache-karaf-3.0.5/data/log/karaf.log
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 368.428s
-
-OK
-
-
diff --git a/src/test/results/voltha/ponsim/test_5_subscribers_with_voltha_for_igmp_with_10_group_joins_verifying_traffic b/src/test/results/voltha/ponsim/test_5_subscribers_with_voltha_for_igmp_with_10_group_joins_verifying_traffic
deleted file mode 100644
index 2e88fae..0000000
--- a/src/test/results/voltha/ponsim/test_5_subscribers_with_voltha_for_igmp_with_10_group_joins_verifying_traffic
+++ /dev/null
@@ -1,1036 +0,0 @@
-cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 250ca5560fe5
-Checking operational status for device 250ca5560fe5
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 11:15:10] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 timed out
-Subscriber on port veth0 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth4 timed out
-Subscriber on port veth8 timed out
-Joining channel 0 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Joining channel 0 for subscriber port veth0
-Joining channel 0 for subscriber port veth4
-Joining channel 0 for subscriber port veth6
-Joining channel 0 for subscriber port veth8
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368644.708114096' |>>>>
-Packet received in 2800560.029 usecs for group 225.0.0.2 after join
-Subscriber on port veth10 timed out
-Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368645.212110908' |>>>>
-Packet received in 2984200.667 usecs for group 225.0.0.2 after join
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Subscriber on port veth10 timed out
-Joining channel 1 for subscriber port veth0
-Subscriber on port veth10 not received 0 packets
-Joining channel 1 for subscriber port veth10
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 1 for subscriber port veth4
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 not received 0 packets
-Joining channel 1 for subscriber port veth6
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 1 for subscriber port veth8
-Packet received for group 225.0.0.3, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='368660.304092959' |>>>>
-Packet received in 2924265.775 usecs for group 225.0.0.3 after join
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368664.752097126' |>>>>
-Subscriber on port veth4 not received 1 packets
-Exception in thread Thread-3:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368669.368119477' |>>>>
-Subscriber on port veth0 not received 1 packets
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 2 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 2 for subscriber port veth6
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 2 for subscriber port veth8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 3 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 3 for subscriber port veth6
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 3 for subscriber port veth8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 4 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 4 for subscriber port veth6
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 4 for subscriber port veth8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 5 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 5 for subscriber port veth6
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 5 for subscriber port veth8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='368740.052085483' |>>>>
-Packet received in 2893571.980 usecs for group 225.0.0.7 after join
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 6 for subscriber port veth8
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 6 for subscriber port veth10
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 not received 0 packets
-Joining channel 6 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth8 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='368763.352129464' |>>>>
-Subscriber on port veth8 not received 1 packets
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Exception in thread Thread-5:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 7 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 7 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 8 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 8 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 9 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 9 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Joining channel 0 for subscriber port veth10
-All subscribers have joined the channel
-Joining channel 0 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 1 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 1 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 2 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 2 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 3 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 3 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 4 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 4 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 5 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 5 for subscriber port veth6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 6 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 6 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 7 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 7 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 8 for subscriber port veth10
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 8 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 9 for subscriber port veth10
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 9 for subscriber port veth6
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 250ca5560fe5
-Deleting device 250ca5560fe5
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 5332, in test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 250ca5560fe5
-cordTester: INFO: Checking operational status for device 250ca5560fe5
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Joining channel 0 for subscriber port veth6
-cordTester: INFO: Joining channel 0 for subscriber port veth8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368644.708114096' |>>>>
-cordTester: DEBUG: Packet received in 2800560.029 usecs for group 225.0.0.2 after join
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368645.212110908' |>>>>
-cordTester: DEBUG: Packet received in 2984200.667 usecs for group 225.0.0.2 after join
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth0 not received 0 packets
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Joining channel 1 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth4 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth8
-cordTester: INFO: Packet received for group 225.0.0.3, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='368660.304092959' |>>>>
-cordTester: DEBUG: Packet received in 2924265.775 usecs for group 225.0.0.3 after join
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368664.752097126' |>>>>
-cordTester: INFO: Subscriber on port veth4 not received 1 packets
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='368669.368119477' |>>>>
-cordTester: INFO: Subscriber on port veth0 not received 1 packets
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='368740.052085483' |>>>>
-cordTester: DEBUG: Packet received in 2893571.980 usecs for group 225.0.0.7 after join
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth8
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='368763.352129464' |>>>>
-cordTester: INFO: Subscriber on port veth8 not received 1 packets
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Joining channel 0 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth10
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth6
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 250ca5560fe5
-cordTester: INFO: Deleting device 250ca5560fe5
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 493.518s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_5_subscriber_with_voltha_for_igmp_with_10_group_joins_verifying_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_9_subscribers_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic b/src/test/results/voltha/ponsim/test_9_subscribers_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic
deleted file mode 100644
index fa3d4c2..0000000
--- a/src/test/results/voltha/ponsim/test_9_subscribers_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic
+++ /dev/null
@@ -1,2042 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic']
-Running tests: ['voltha:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 845cebb73315
-Checking operational status for device 845cebb73315
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 11:26:32] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-Adding group 225.0.0.6
-Adding group 225.0.0.7
-Adding group 225.0.0.8
-Adding group 225.0.0.9
-Adding group 225.0.0.10
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth18 timed out
-Subscriber on port veth4 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth0 timed out
-Subscriber on port veth16 timed out
-Subscriber on port veth12 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth10 timed out
-Joining channel 0 for subscriber port veth18
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Joining channel 0 for subscriber port veth6
-Joining channel 0 for subscriber port veth8
-Joining channel 0 for subscriber port veth0
-Joining channel 0 for subscriber port veth12
-Joining channel 0 for subscriber port veth16
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-Joining channel 0 for subscriber port veth14
-Joining channel 0 for subscriber port veth10
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369327.272132113' |>>>>
-Packet received in 3138701.374 usecs for group 225.0.0.2 after join
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth18 timed out
-Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369328.060124559' |>>>>
-Packet received in 3014366.383 usecs for group 225.0.0.2 after join
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth12 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth16 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth14 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 1 for subscriber port veth4
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 1 for subscriber port veth18
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 1 for subscriber port veth0
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 1 for subscriber port veth12
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 timed out
-Subscriber on port veth16 timed out
-Packet received for group 225.0.0.3, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='369342.388105031' |>>>>
-Subscriber on port veth6 not received 0 packets
-Subscriber on port veth16 not received 0 packets
-Packet received in 3002938.600 usecs for group 225.0.0.3 after join
-Joining channel 1 for subscriber port veth6
-Joining channel 1 for subscriber port veth16
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 1 for subscriber port veth10
-Subscriber on port veth8 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth8 not received 0 packets
-Joining channel 1 for subscriber port veth14
-Joining channel 1 for subscriber port veth8
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth18 timed out
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369347.496137241' |>>>>
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth4 not received 1 packets
-Exception in thread Thread-3:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth0 timed out
-Subscriber on port veth12 timed out
-Subscriber on port veth16 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369353.156139199' |>>>>
-Subscriber on port veth0 not received 1 packets
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth18 timed out
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth18 not received 0 packets
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Joining channel 2 for subscriber port veth18
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 2 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth16 timed out
-Subscriber on port veth16 not received 0 packets
-Joining channel 2 for subscriber port veth16
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth10 not received 0 packets
-Subscriber on port veth6 not received 0 packets
-Subscriber on port veth14 timed out
-Joining channel 2 for subscriber port veth10
-Subscriber on port veth8 timed out
-Joining channel 2 for subscriber port veth6
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth8 not received 0 packets
-Joining channel 2 for subscriber port veth14
-Joining channel 2 for subscriber port veth8
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth16 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 3 for subscriber port veth18
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth12 not received 0 packets
-Joining channel 3 for subscriber port veth12
-Subscriber on port veth16 timed out
-Subscriber on port veth16 not received 0 packets
-Joining channel 3 for subscriber port veth16
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 3 for subscriber port veth6
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 3 for subscriber port veth14
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 3 for subscriber port veth10
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 3 for subscriber port veth8
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth16 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 4 for subscriber port veth18
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 4 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth16 timed out
-Subscriber on port veth16 not received 0 packets
-Joining channel 4 for subscriber port veth16
-Subscriber on port veth14 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth6 not received 0 packets
-Subscriber on port veth8 timed out
-Joining channel 4 for subscriber port veth14
-Joining channel 4 for subscriber port veth6
-Subscriber on port veth8 not received 0 packets
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 4 for subscriber port veth8
-Joining channel 4 for subscriber port veth10
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.6, channel 5
-All subscribers have joined the channel
-All subscribers have joined the channel
-Packet received for group 225.0.0.6, subscriber, port veth16 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cd src=1.2.3.4 dst=225.0.0.6 options=[] |<Raw  load='369407.180133922' |>>>>
-Packet received in 2843251.160 usecs for group 225.0.0.6 after join
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth8 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 5 for subscriber port veth18
-Subscriber on port veth16 timed out
-Subscriber on port veth16 not received 0 packets
-Joining channel 5 for subscriber port veth16
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 5 for subscriber port veth12
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 5 for subscriber port veth6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 5 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth8 timed out
-Joining channel 5 for subscriber port veth14
-Subscriber on port veth8 not received 0 packets
-Joining channel 5 for subscriber port veth8
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth16 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='369429.108105927' |>>>>
-Packet received in 2887614.511 usecs for group 225.0.0.7 after join
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth16 checking data traffic receiving from group 225.0.0.6, channel 5
-Packet received for group 225.0.0.6, subscriber, port veth16 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cd src=1.2.3.4 dst=225.0.0.6 options=[] |<Raw  load='369432.968136551' |>>>>
-Subscriber on port veth6 timed out
-Subscriber on port veth16 not received 1 packets
-Exception in thread Thread-9:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 6 for subscriber port veth18
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth8 timed out
-Subscriber on port veth8 not received 0 packets
-Joining channel 6 for subscriber port veth8
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 6 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 6 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 6 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.8, channel 7
-Joining channel 6 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth8 timed out
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='369453.512143964' |>>>>
-Subscriber on port veth8 not received 1 packets
-Exception in thread Thread-5:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/root/test/src/test/voltha/../utils/threadPool.py", line 40, in run
-    work.__call__()
-  File "/root/test/src/test/voltha/volthaTest.py", line 186, in pool_cb
-    self.test_status = cb(self.subscriber, multiple_sub = True)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1093, in igmp_flow_check
-    subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-  File "/root/test/src/test/voltha/volthaTest.py", line 128, in channel_receive
-    assert_equal(len(r), 0)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: 1 != 0
-
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 7 for subscriber port veth18
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 7 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 7 for subscriber port veth6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 7 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 7 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 8 for subscriber port veth18
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 8 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 8 for subscriber port veth6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 8 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 8 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.10, channel 9
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-All subscribers have joined the channel
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 9 for subscriber port veth18
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 9 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 9 for subscriber port veth6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 9 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 9 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth12 timed out
-Joining channel 0 for subscriber port veth18
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-All subscribers have joined the channel
-Joining channel 0 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-Joining channel 0 for subscriber port veth6
-Joining channel 0 for subscriber port veth10
-Joining channel 0 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 1 for subscriber port veth18
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth12 timed out
-All subscribers have joined the channel
-Subscriber on port veth12 not received 0 packets
-Joining channel 1 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth6 not received 0 packets
-Subscriber on port veth10 not received 0 packets
-Joining channel 1 for subscriber port veth6
-Joining channel 1 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 1 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 2 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 2 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 2 for subscriber port veth10
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 2 for subscriber port veth6
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 2 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 3 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 3 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 3 for subscriber port veth10
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 3 for subscriber port veth6
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 3 for subscriber port veth14
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 4 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-All subscribers have joined the channel
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 4 for subscriber port veth12
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 4 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth14 not received 0 packets
-Subscriber on port veth6 not received 0 packets
-Joining channel 4 for subscriber port veth14
-Joining channel 4 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 5 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 5 for subscriber port veth12
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 5 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 5 for subscriber port veth14
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 5 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth10 timed out
-Subscriber on port veth14 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 6 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 6 for subscriber port veth12
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 6 for subscriber port veth10
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 6 for subscriber port veth14
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 6 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth14 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 7 for subscriber port veth18
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 7 for subscriber port veth12
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 7 for subscriber port veth14
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 7 for subscriber port veth10
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 7 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth18 timed out
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth14 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 8 for subscriber port veth18
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 8 for subscriber port veth12
-All subscribers have joined the channel
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 8 for subscriber port veth14
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 8 for subscriber port veth10
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 8 for subscriber port veth6
-All subscribers have joined the channel
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.10, channel 9
-All subscribers have joined the channel
-All subscribers have joined the channel
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth18 timed out
-All subscribers have joined the channel
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-Subscriber on port veth12 timed out
-Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth14 timed out
-Subscriber on port veth10 timed out
-Subscriber on port veth6 timed out
-Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth18 timed out
-Subscriber on port veth18 not received 0 packets
-Joining channel 9 for subscriber port veth18
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-Subscriber on port veth12 timed out
-Subscriber on port veth12 not received 0 packets
-Joining channel 9 for subscriber port veth12
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth14 timed out
-Subscriber on port veth14 not received 0 packets
-Joining channel 9 for subscriber port veth14
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth10 timed out
-Subscriber on port veth10 not received 0 packets
-Joining channel 9 for subscriber port veth10
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth6 timed out
-Subscriber on port veth6 not received 0 packets
-Joining channel 9 for subscriber port veth6
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 845cebb73315
-Deleting device 845cebb73315
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 5351, in test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 845cebb73315
-cordTester: INFO: Checking operational status for device 845cebb73315
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.2
-cordTester: DEBUG: Adding group 225.0.0.3
-cordTester: DEBUG: Adding group 225.0.0.4
-cordTester: DEBUG: Adding group 225.0.0.5
-cordTester: DEBUG: Adding group 225.0.0.6
-cordTester: DEBUG: Adding group 225.0.0.7
-cordTester: DEBUG: Adding group 225.0.0.8
-cordTester: DEBUG: Adding group 225.0.0.9
-cordTester: DEBUG: Adding group 225.0.0.10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth18
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Joining channel 0 for subscriber port veth6
-cordTester: INFO: Joining channel 0 for subscriber port veth8
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Joining channel 0 for subscriber port veth12
-cordTester: INFO: Joining channel 0 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Joining channel 0 for subscriber port veth14
-cordTester: INFO: Joining channel 0 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369327.272132113' |>>>>
-cordTester: DEBUG: Packet received in 3138701.374 usecs for group 225.0.0.2 after join
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369328.060124559' |>>>>
-cordTester: DEBUG: Packet received in 3014366.383 usecs for group 225.0.0.2 after join
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth4 not received 0 packets
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 1 for subscriber port veth4
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth0 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth12
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Packet received for group 225.0.0.3, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='369342.388105031' |>>>>
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: DEBUG: Packet received in 3002938.600 usecs for group 225.0.0.3 after join
-cordTester: INFO: Subscriber on port veth16 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth6
-cordTester: INFO: Joining channel 1 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth8
-cordTester: INFO: Joining channel 1 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth4 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369347.496137241' |>>>>
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth4 not received 1 packets
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Packet received for group 225.0.0.2, subscriber, port veth0 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='369353.156139199' |>>>>
-cordTester: INFO: Subscriber on port veth0 not received 1 packets
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Joining channel 2 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth16 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Joining channel 2 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Joining channel 2 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth14
-cordTester: INFO: Joining channel 2 for subscriber port veth8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth16 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth16 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Joining channel 4 for subscriber port veth14
-cordTester: INFO: Joining channel 4 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth8
-cordTester: INFO: Joining channel 4 for subscriber port veth10
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Packet received for group 225.0.0.6, subscriber, port veth16 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cd src=1.2.3.4 dst=225.0.0.6 options=[] |<Raw  load='369407.180133922' |>>>>
-cordTester: DEBUG: Packet received in 2843251.160 usecs for group 225.0.0.6 after join
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: Subscriber on port veth16 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth16
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Joining channel 5 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth16 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='369429.108105927' |>>>>
-cordTester: DEBUG: Packet received in 2887614.511 usecs for group 225.0.0.7 after join
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth16 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Packet received for group 225.0.0.6, subscriber, port veth16 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cd src=1.2.3.4 dst=225.0.0.6 options=[] |<Raw  load='369432.968136551' |>>>>
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth16 not received 1 packets
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth8 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth8 timed out
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth8 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Packet received for group 225.0.0.7, subscriber, port veth8 and from source ip 1.2.3.4 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cc src=1.2.3.4 dst=225.0.0.7 options=[] |<Raw  load='369453.512143964' |>>>>
-cordTester: INFO: Subscriber on port veth8 not received 1 packets
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Joining channel 0 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Joining channel 0 for subscriber port veth6
-cordTester: INFO: Joining channel 0 for subscriber port veth10
-cordTester: INFO: Joining channel 0 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth6
-cordTester: INFO: Joining channel 1 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 1 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.2, channel 1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 2 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.3, channel 2
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth6
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 3 for subscriber port veth14
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.4, channel 3
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth12
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 4 for subscriber port veth14
-cordTester: INFO: Joining channel 4 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.5, channel 4
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth12
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 5 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.6, channel 5
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth12
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 6 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.7, channel 6
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth12
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 7 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth18
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.8, channel 7
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth12
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth14
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth10
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 8 for subscriber port veth6
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.10, channel 9
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth18 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth12 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth18 timed out
-cordTester: INFO: Subscriber on port veth18 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth18
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth14 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth10 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth6 checking data traffic receiving from group 225.0.0.9, channel 8
-cordTester: INFO: Subscriber on port veth12 timed out
-cordTester: INFO: Subscriber on port veth12 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth12
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth14 timed out
-cordTester: INFO: Subscriber on port veth14 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth14
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth10 timed out
-cordTester: INFO: Subscriber on port veth10 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth10
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth6 timed out
-cordTester: INFO: Subscriber on port veth6 not received 0 packets
-cordTester: INFO: Joining channel 9 for subscriber port veth6
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 845cebb73315
-cordTester: INFO: Deleting device 845cebb73315
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 491.050s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_9_subscriber_with_voltha_for_igmp_with_10_group_joins_and_verify_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication
deleted file mode 100644
index f429c51..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication
+++ /dev/null
@@ -1,90 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device a262ef102041
-Checking operational status for device a262ef102041
-Installing OLT app /root/test/src/test/voltha/../apps/olt-app-2.0-SNAPSHOT.oar
-Adding subscribers through OLT app
-Running subscriber veth0 tls auth test with valid TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-Interface veth0. Received data in change cipher spec function is None
-Verifying TLS Change Cipher spec record type 14 over interface veth0
-Handshake finished. Sending empty data over interface veth0
-Server authentication successfull over interface veth0
-Disabling device a262ef102041
-Deleting device a262ef102041
-Uninstalling OLT app /root/test/src/test/voltha/../apps/olt-app-2.0-SNAPSHOT.oar
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 80.582s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_failure b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_failure
deleted file mode 100644
index 2165b0a..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_failure
+++ /dev/null
@@ -1,88 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim_olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 30f27bd183f0
-Checking operational status for device 30f27bd183f0
-Installing OLT app
-Adding subscribers through OLT app
-Running subscriber veth0 tls auth test with no TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 605 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Disabling device 30f27bd183f0
-Deleting device 30f27bd183f0
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 95.043s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_failure Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert
deleted file mode 100644
index c426364..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert
+++ /dev/null
@@ -1,87 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim_olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device e7a62789e9eb
-Checking operational status for device e7a62789e9eb
-Installing OLT app
-Adding subscribers through OLT app
-Running subscriber veth0 tls auth test with invalid TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Disabling device e7a62789e9eb
-Deleting device e7a62789e9eb
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 95.067s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation
deleted file mode 100644
index a62718d..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation
+++ /dev/null
@@ -1,89 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim_olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device c4072073c4ce
-Checking operational status for device c4072073c4ce
-Installing OLT app
-Adding subscribers through OLT app
-Running subscriber veth0 tls auth test with app_deactivate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-Restart aaa app in onos during tls auth flow check on voltha
-entering into testFail function for interface veth0
-TLS verification failed
-Disabling device c4072073c4ce
-Deleting device c4072073c4ce
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 95.010s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_and_leave_for_one_group_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_and_leave_for_one_group_verifying_traffic
deleted file mode 100644
index bdd7048..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_and_leave_for_one_group_verifying_traffic
+++ /dev/null
@@ -1,181 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 6d94a5a49527
-Checking operational status for device 6d94a5a49527
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28312.012086531' |>>>>
-Packet received in 2724053.106 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28325.204115582' |>>>>
-Packet received in 6089229.310 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28344.748141759' |>>>>
-Packet received in 7441608.994 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cf src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28379.132114277' |>>>>
-Packet received in 2630788.196 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28393.892107487' |>>>>
-Packet received in 2659734.116 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28403.576125853' |>>>>
-Packet received in 2629052.085 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28418.256111493' |>>>>
-Packet received in 2563747.333 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Packet received for group 225.0.0.4, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.4 options=[] |<Raw  load='28433.020127298' |>>>>
-Packet received in 2638333.265 usecs for group 225.0.0.4 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28447.68811653' |>>>>
-Packet received in 2578036.697 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28462.456127131' |>>>>
-Packet received in 2638852.107 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28472.036110591' |>>>>
-Packet received in 2618532.062 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28486.82808718' |>>>>
-Packet received in 2574205.690 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Packet received for group 225.0.0.4, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.4 options=[] |<Raw  load='28501.540146584' |>>>>
-Packet received in 2633957.728 usecs for group 225.0.0.4 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28516.30416742' |>>>>
-Packet received in 2625209.951 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 6d94a5a49527
-Deleting device 6d94a5a49527
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 303.426s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
deleted file mode 100644
index bdd7048..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
+++ /dev/null
@@ -1,181 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 6d94a5a49527
-Checking operational status for device 6d94a5a49527
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.2
-Adding group 225.0.0.3
-Adding group 225.0.0.4
-Adding group 225.0.0.5
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28312.012086531' |>>>>
-Packet received in 2724053.106 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28325.204115582' |>>>>
-Packet received in 6089229.310 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28344.748141759' |>>>>
-Packet received in 7441608.994 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95cf src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28379.132114277' |>>>>
-Packet received in 2630788.196 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28393.892107487' |>>>>
-Packet received in 2659734.116 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28403.576125853' |>>>>
-Packet received in 2629052.085 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d1 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28418.256111493' |>>>>
-Packet received in 2563747.333 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Packet received for group 225.0.0.4, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.4 options=[] |<Raw  load='28433.020127298' |>>>>
-Packet received in 2638333.265 usecs for group 225.0.0.4 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28447.68811653' |>>>>
-Packet received in 2578036.697 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='28462.456127131' |>>>>
-Packet received in 2638852.107 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Packet received for group 225.0.0.2, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.2 options=[] |<Raw  load='28472.036110591' |>>>>
-Packet received in 2618532.062 usecs for group 225.0.0.2 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Packet received for group 225.0.0.3, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d2 src=1.2.3.4 dst=225.0.0.3 options=[] |<Raw  load='28486.82808718' |>>>>
-Packet received in 2574205.690 usecs for group 225.0.0.3 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.2, channel 1
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Packet received for group 225.0.0.4, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.4 options=[] |<Raw  load='28501.540146584' |>>>>
-Packet received in 2633957.728 usecs for group 225.0.0.4 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.3, channel 2
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.5, channel 4
-Packet received for group 225.0.0.5, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d0 src=1.2.3.4 dst=225.0.0.5 options=[] |<Raw  load='28516.30416742' |>>>>
-Packet received in 2625209.951 usecs for group 225.0.0.5 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.4, channel 3
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 6d94a5a49527
-Deleting device 6d94a5a49527
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 303.426s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_5_groups_joins_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic
deleted file mode 100644
index b19506e..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic
+++ /dev/null
@@ -1,102 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device a3a80084a23c
-Checking operational status for device a3a80084a23c
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5 and waited till GMI timer expires
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='292669.492156291' |>>>>
-Packet received in 292669543169.052 usecs for group 225.0.0.1 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='292669.592150434' |>>>>
-Packet received in 292669635200.946 usecs for group 225.0.0.1 after join
-Again include the channel 0 on port veth0 with souce list ip 2.3.4.5
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='292674.668161033' |>>>>
-Packet received in 292674725007.539 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='292674.688138242' |>>>>
-Packet received in 292674755443.746 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='292674.720142024' |>>>>
-Packet received in 292674757166.690 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='292674.736150438' |>>>>
-Packet received in 292674801872.757 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='292674.752122981' |>>>>
-Packet received in 292674803573.924 usecs for group 225.0.0.1 after join
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device a3a80084a23c
-Deleting device a3a80084a23c
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 163.514s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic
deleted file mode 100644
index 297efaa..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic
+++ /dev/null
@@ -1,158 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 7c2303617024
-Checking operational status for device 7c2303617024
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5 and waited till GMI timer expires
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='291999.028163464' |>>>>
-Packet received in 291999067076.457 usecs for group 225.0.0.1 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291999.096106458' |>>>>
-Packet received in 291999143102.993 usecs for group 225.0.0.1 after join
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 7c2303617024
-Deleting device 7c2303617024
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4784, in test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1425, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 7c2303617024
-cordTester: INFO: Checking operational status for device 7c2303617024
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='291999.028163464' |>>>>
-cordTester: DEBUG: Packet received in 291999067076.457 usecs for group 225.0.0.1 after join
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291999.096106458' |>>>>
-cordTester: DEBUG: Packet received in 291999143102.993 usecs for group 225.0.0.1 after join
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 7c2303617024
-cordTester: INFO: Deleting device 7c2303617024
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 158.464s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic
deleted file mode 100644
index fcd447b..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic
+++ /dev/null
@@ -1,158 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 4c33ab2e2e5c
-Checking operational status for device 4c33ab2e2e5c
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='290657.312138207' |>>>>
-Packet received in 290657352860.465 usecs for group 225.0.0.1 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='290657.392157301' |>>>>
-Packet received in 290657438426.612 usecs for group 225.0.0.1 after join
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 4c33ab2e2e5c
-Deleting device 4c33ab2e2e5c
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4751, in test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1437, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 4c33ab2e2e5c
-cordTester: INFO: Checking operational status for device 4c33ab2e2e5c
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='290657.312138207' |>>>>
-cordTester: DEBUG: Packet received in 290657352860.465 usecs for group 225.0.0.1 after join
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='290657.392157301' |>>>>
-cordTester: DEBUG: Packet received in 290657438426.612 usecs for group 225.0.0.1 after join
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 4c33ab2e2e5c
-cordTester: INFO: Deleting device 4c33ab2e2e5c
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 158.752s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic
deleted file mode 100644
index 2bfc958..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic
+++ /dev/null
@@ -1,102 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 93b691d51a9b
-Checking operational status for device 93b691d51a9b
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0 from specific source address 2.3.4.5 and waited till GMI timer expires
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291376.636122663' |>>>>
-Packet received in 291376667927.526 usecs for group 225.0.0.1 after join
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291376.708121714' |>>>>
-Packet received in 291376755533.492 usecs for group 225.0.0.1 after join
-Again include the channel 0 on port veth0 with souce list ip 2.3.4.5
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='291381.820124179' |>>>>
-Packet received in 291381868686.292 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291381.840171074' |>>>>
-Packet received in 291381891514.251 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='291381.864159904' |>>>>
-Packet received in 291381911265.678 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 2.3.4.5 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='291381.888154967' |>>>>
-Packet received in 291381931143.231 usecs for group 225.0.0.1 after join
-Packet received for group 225.0.0.1, subscriber, port veth0 and from source ip 3.4.5.6 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='291381.908157131' |>>>>
-Packet received in 291381955066.951 usecs for group 225.0.0.1 after join
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 93b691d51a9b
-Deleting device 93b691d51a9b
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 163.767s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic
deleted file mode 100644
index 08f7f65..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic
+++ /dev/null
@@ -1,85 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 4e19bf53124a
-Checking operational status for device 4e19bf53124a
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber not receive data from channel 0 on any specific source veth0
-Leaving channel 0 for subscriber on port veth0
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 4e19bf53124a
-Deleting device 4e19bf53124a
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 96.090s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic
deleted file mode 100644
index e034a33..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic
+++ /dev/null
@@ -1,135 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -s -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 7fa80fefa6fa
-Checking operational status for device 7fa80fefa6fa
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d0 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='275477.644103217' |>>>>
-Packet received in 2640114.813 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x93d1 src=2.3.4.5 dst=225.0.0.1 options=[] |<Raw  load='275483.25213537' |>>>>
-Packet received in 275483311267.105 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275488.868162173' |>>>>
-Packet received in 275488915243.224 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275494.452175179' |>>>>
-Packet received in 275494495719.541 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91cf src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275500.02815333' |>>>>
-Packet received in 275500079363.157 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275505.608123689' |>>>>
-Packet received in 275505643135.411 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275511.172117384' |>>>>
-Packet received in 275511206921.443 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275516.732175794' |>>>>
-Packet received in 275516783518.311 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275522.332183166' |>>>>
-Packet received in 275522379532.744 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275527.948172373' |>>>>
-Packet received in 275527975401.304 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=36 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x91ce src=3.4.5.6 dst=225.0.0.1 options=[] |<Raw  load='275533.500136833' |>>>>
-Packet received in 275533547406.212 usecs for group 225.0.0.1 after join
-Joining channel 0 for subscriber port veth0
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 7fa80fefa6fa
-Deleting device 7fa80fefa6fa
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 151.525s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_verifying_traffic
deleted file mode 100644
index bf55690..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_join_verifying_traffic
+++ /dev/null
@@ -1,119 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 8da52620682f
-Checking operational status for device 8da52620682f
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='7973.288140714' |>>>>
-Packet received in 7973315431.274 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='7978.844143899' |>>>>
-Packet received in 7978867880.085 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='7984.384124229' |>>>>
-Packet received in 7984419702.470 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='7989.940148947' |>>>>
-Packet received in 7989975748.276 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='7995.500134472' |>>>>
-Packet received in 7995535542.973 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=33 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d5 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='8001.07616022' |>>>>
-Packet received in 8001107714.744 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='8006.624133829' |>>>>
-Packet received in 8006655266.652 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='8012.176143589' |>>>>
-Packet received in 8012203422.375 usecs for group 225.0.0.1 after join
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=34 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d4 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='8017.732158738' |>>>>
-Packet received in 8017763287.505 usecs for group 225.0.0.1 after join
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 8da52620682f
-Deleting device 8da52620682f
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 148.734s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_join_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic
deleted file mode 100644
index ec4219a..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic
+++ /dev/null
@@ -1,160 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device c1f51fcbbb52
-Checking operational status for device c1f51fcbbb52
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18131.072142934' |>>>>
-Packet received in 18131097266.764 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18150.752129078' |>>>>
-Packet received in 18150783535.370 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18170.416134979' |>>>>
-Packet received in 18170443306.484 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18190.100107566' |>>>>
-Packet received in 18190127273.255 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18209.772120511' |>>>>
-Packet received in 18209803539.620 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18229.484139983' |>>>>
-Packet received in 18229523601.525 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18249.180133854' |>>>>
-Packet received in 18249203360.848 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18268.868137417' |>>>>
-Packet received in 18268916136.375 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18288.572102305' |>>>>
-Packet received in 18288611507.083 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device c1f51fcbbb52
-Deleting device c1f51fcbbb52
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 289.953s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_verifying_traffic b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_verifying_traffic
deleted file mode 100644
index 8bdb49e..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_igmp_leave_verifying_traffic
+++ /dev/null
@@ -1,160 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device c1f51fcbbb52
-Checking operational status for device c1f51fcbbb52
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18131.072142934' |>>>>
-Packet received in 18131097266.764 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18150.752129078' |>>>>
-Packet received in 18150783535.370 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18170.416134979' |>>>>
-Packet received in 18170443306.484 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18190.100107566' |>>>>
-Packet received in 18190127273.255 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18209.772120511' |>>>>
-Packet received in 18209803539.620 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18229.484139983' |>>>>
-Packet received in 18229523601.525 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18249.180133854' |>>>>
-Packet received in 18249203360.848 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18268.868137417' |>>>>
-Packet received in 18268916136.375 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Packet received for group 225.0.0.1, subscriber, port veth0 showing full packet <bound method Ether.show of <Ether  dst=01:00:5e:00:01:01 src=02:88:b4:e4:90:77 type=0x800 |<IP  version=4L ihl=5L tos=0x0 len=35 id=1 flags= frag=0L ttl=64 proto=hopopt chksum=0x95d3 src=1.2.3.4 dst=225.0.0.1 options=[] |<Raw  load='18288.572102305' |>>>>
-Packet received in 18288611507.083 usecs for group 225.0.0.1 after join
-Leaving channel 0 for subscriber on port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device c1f51fcbbb52
-Deleting device c1f51fcbbb52
-Uninstalling OLT app
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 289.953s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_igmp_leave_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts b/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts
deleted file mode 100644
index 43ddc0a..0000000
--- a/src/test/results/voltha/ponsim/test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts
+++ /dev/null
@@ -1,151 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_kafka_1 is up-to-date
-compose_registrator_1 is up-to-date
-IP 172.18.0.3 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.2 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts']
-Running tests: ['voltha:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim_olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 578984f1d484
-Checking operational status for device 578984f1d484
-Installing OLT app
-Adding subscribers through OLT app
-Running subscriber veth0 tls auth test with invalid TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Running subscriber veth0 tls auth test with invalid TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Running subscriber veth0 tls auth test with no TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 605 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Running subscriber veth0 tls auth test with invalid TLS certificate
-llheader packet is None
-source mac of  packet is 4efe788ced55
-_eapStart method started over interface veth0
-Inside EAP ID Req for interface veth0
-Got EAPOL packet with type id and code request for interface veth0
-Interface: veth0, Packet code: 1, type: 1, id: 0
-Send EAP Response with identity raduser over interface veth0
-Got hello request for id 1 over interface veth0
-Sending Client Hello TLS payload of len 50, id 1 over interface veth0
-Receiving server certificates over interface veth0
-Interface veth0, Appending packet type 02 to packet history of len 74
-Interface: veth0, Pending bytes left 1306
-Interface veth0, Appending packet type 0b to packet history of len 2216
-Interface veth0, Appending packet type 0d to packet history of len 154
-server hello received over interface veth0
-Interface veth0, Appending packet type 0e to packet history of len 4
-Sending client certificate request over interface veth0
-Sending Client Hello TLS Certificate payload of len 1569 over interface veth0
-entering into testFail function for interface veth0
-TLS verification failed
-Disabling device 578984f1d484
-Deleting device 578984f1d484
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 127.708s
-
-OK
-Test volthaTest.py:voltha_exchange.test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic b/src/test/results/voltha/ponsim/test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic
deleted file mode 100644
index df9e8b4..0000000
--- a/src/test/results/voltha/ponsim/test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic
+++ /dev/null
@@ -1,154 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 3b2dc68f8a3e
-Checking operational status for device 3b2dc68f8a3e
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber not receive data from channel 0 on any specific source veth0
-Send join to multicast group with exclude empty source list and waited till GMI timer expires
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 3b2dc68f8a3e
-Deleting device 3b2dc68f8a3e
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4870, in test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 3b2dc68f8a3e
-cordTester: INFO: Checking operational status for device 3b2dc68f8a3e
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber not receive data from channel 0 on any specific source veth0
-cordTester: INFO: Send join to multicast group with exclude empty source list and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 3b2dc68f8a3e
-cordTester: INFO: Deleting device 3b2dc68f8a3e
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 162.502s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic
deleted file mode 100644
index a666bfb..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic
+++ /dev/null
@@ -1,164 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device e147e595aa6e
-Checking operational status for device e147e595aa6e
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 09:57:54] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4
-Disabling device e147e595aa6e
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Leaving channel 0 for subscriber on port veth4
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device e147e595aa6e
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Igmp flow check expected to fail during olt device is disabled, so ignored test_status of this test
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 121.717s
-
-OK
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic Success
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic
deleted file mode 100644
index 8b437b0..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic
+++ /dev/null
@@ -1,217 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device d4b037ef8723
-Checking operational status for device d4b037ef8723
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device d4b037ef8723
-Deleting device d4b037ef8723
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4916, in test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device d4b037ef8723
-cordTester: INFO: Checking operational status for device d4b037ef8723
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device d4b037ef8723
-cordTester: INFO: Deleting device d4b037ef8723
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 333.391s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic
deleted file mode 100644
index 2a22b20..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic
+++ /dev/null
@@ -1,320 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 47635be6606f
-Checking operational status for device 47635be6606f
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth4 timed out
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth0 timed out
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth4 timed out
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 47635be6606f
-Deleting device 47635be6606f
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4892, in test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 47635be6606f
-cordTester: INFO: Checking operational status for device 47635be6606f
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 47635be6606f
-cordTester: INFO: Deleting device 47635be6606f
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 253.336s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic
deleted file mode 100644
index 75eb3f4..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic
+++ /dev/null
@@ -1,216 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 53470ce3065b
-Checking operational status for device 53470ce3065b
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 53470ce3065b
-Deleting device 53470ce3065b
-Uninstalling OLT app
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/root/test/src/test/voltha/volthaTest.py", line 4942, in test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic
-    num_channels = num_channels)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-AssertionError: False != True
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 53470ce3065b
-cordTester: INFO: Checking operational status for device 53470ce3065b
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Leaving channel 0 for subscriber on port veth4 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Leaving channel 0 for subscriber on port veth0 from specific source address 1.2.3.4 and waited till GMI timer expires
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 53470ce3065b
-cordTester: INFO: Deleting device 53470ce3065b
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 330.853s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic Failure
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic
deleted file mode 100644
index 1909e8a..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic
+++ /dev/null
@@ -1,233 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 821c0b918046
-Checking operational status for device 821c0b918046
-Installing OLT app
-Adding subscribers through OLT app
-Disabling device 821c0b918046
-172.17.0.4 - - [01/Aug/2017 10:21:35] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Disabling device 821c0b918046
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Disabling device 821c0b918046
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Disabling device 821c0b918046
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth4 timed out
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 821c0b918046
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Igmp flow check expected to fail if olt device is disabled, so ignored test_status of this test
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 251.048s
-
-OK
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic Success
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic
deleted file mode 100644
index bd32357..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic
+++ /dev/null
@@ -1,398 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 9d3fca92ec26
-Checking operational status for device 9d3fca92ec26
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 10:39:57] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Restarting olt or onu device 9d3fca92ec26
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Restarting olt or onu device 9d3fca92ec26
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Restarting olt or onu device 9d3fca92ec26
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-Restarting olt or onu device 9d3fca92ec26
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 9d3fca92ec26
-Deleting device 9d3fca92ec26
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Unhandled Error
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 57, in <lambda>
-    installSignalHandlers=False))
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1192, in run
-    self.mainLoop()
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop
-    self.runUntilCurrent()
---- <exception caught here> ---
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 824, in runUntilCurrent
-    call.func(*call.args, **call.kw)
-  File "/root/test/src/test/voltha/volthaTest.py", line 5305, in igmp_flow_check_operating_olt_admin_restart
-    assert_equal(self.success, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-exceptions.AssertionError: False != True
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 165, in wrapper
-    % timeout)
-TimeExpired: timeout expired before end of test (300.000000 s.)
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 9d3fca92ec26
-cordTester: INFO: Checking operational status for device 9d3fca92ec26
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Restarting olt or onu device 9d3fca92ec26
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 9d3fca92ec26
-cordTester: INFO: Deleting device 9d3fca92ec26
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 324.012s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic
deleted file mode 100644
index f142bf8..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic
+++ /dev/null
@@ -1,408 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 005bd47606cb
-Checking operational status for device 005bd47606cb
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 10:27:59] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-Admin state of uni_port is down
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Admin state of uni_port is up now
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-Admin state of uni_port is down
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Admin state of uni_port is up now
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-Admin state of uni_port is down
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-Admin state of uni_port is up now
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 005bd47606cb
-Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-Admin state of uni_port is down
-Deleting device 005bd47606cb
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Admin state of uni_port is up now
-Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-Admin state of uni_port is down
-Admin state of uni_port is up now
-Unhandled Error
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 57, in <lambda>
-    installSignalHandlers=False))
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1192, in run
-    self.mainLoop()
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop
-    self.runUntilCurrent()
---- <exception caught here> ---
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 824, in runUntilCurrent
-    call.func(*call.args, **call.kw)
-  File "/root/test/src/test/voltha/volthaTest.py", line 5256, in igmp_flow_check_operating_onu_admin_state
-    assert_equal(self.success, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-exceptions.AssertionError: False != True
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 165, in wrapper
-    % timeout)
-TimeExpired: timeout expired before end of test (500.000000 s.)
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 005bd47606cb
-cordTester: INFO: Checking operational status for device 005bd47606cb
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Admin state of uni_port is up now
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Admin state of uni_port is up now
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Admin state of uni_port is up now
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device 005bd47606cb
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: Deleting device 005bd47606cb
-cordTester: INFO: Uninstalling OLT app
-cordTester: INFO: Admin state of uni_port is up now
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: Admin state of uni_port is up now
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 524.042s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic
deleted file mode 100644
index 2643606..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic
+++ /dev/null
@@ -1,165 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device aac23db64e1d
-Checking operational status for device aac23db64e1d
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 10:02:00] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Restarting olt or onu device aac23db64e1d
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device aac23db64e1d
-Deleting device aac23db64e1d
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Igmp flow check expected to fail during olt device is paused, so ignored test_status of this test
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 135.128s
-
-OK
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic Success
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic
deleted file mode 100644
index 7b9aa08..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic
+++ /dev/null
@@ -1,381 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ vi ../voltha/volthaTest.py
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_zookeeper_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_consul_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device e75d026f30b5
-Checking operational status for device e75d026f30b5
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 10:10:42] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth0 timed out
-Restarting olt or onu device e75d026f30b5
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device e75d026f30b5
-Deleting device e75d026f30b5
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Unhandled Error
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 57, in <lambda>
-    installSignalHandlers=False))
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1192, in run
-    self.mainLoop()
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop
-    self.runUntilCurrent()
---- <exception caught here> ---
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 824, in runUntilCurrent
-    call.func(*call.args, **call.kw)
-  File "/root/test/src/test/voltha/volthaTest.py", line 5157, in igmp_flow_check_operating_olt_admin_restart
-    assert_equal(self.success, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-exceptions.AssertionError: False != True
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 165, in wrapper
-    % timeout)
-TimeExpired: timeout expired before end of test (300.000000 s.)
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are True and 200
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device e75d026f30b5
-cordTester: INFO: Checking operational status for device e75d026f30b5
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Restarting olt or onu device e75d026f30b5
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  128
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  129
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  130
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  131
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  132
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  133
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  134
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  135
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  136
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  137
-cordTester: INFO: Deleted subscriber for device of:0000000000000001 on port  138
-cordTester: INFO: Disabling device e75d026f30b5
-cordTester: INFO: Deleting device e75d026f30b5
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 324.048s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic
deleted file mode 100644
index 5eabf09..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic
+++ /dev/null
@@ -1,382 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-Creating network "compose_default" with driver "bridge"
-Creating network "compose_ponmgmt" with driver "bridge"
-Creating compose_zookeeper_1
-Creating compose_consul_1
-Creating compose_fluentd_1
-Creating compose_kafka_1
-Creating compose_registrator_1
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic'] tests across 1 containers in parallel
-Starting test container cord-tester1, image cordtest/nose, tag candidate
-Provisioning the ports for the test container
-
-Running PIPEWORK cmd: pipework pon1_128 -i veth0 -l l1 cord-tester1 192.168.100.1/24
-Running PIPEWORK cmd: pipework ponmgmt -i veth2 -l l2 cord-tester1 192.168.100.2/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth4 -l l3 cord-tester1 192.168.100.3/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth6 -l l4 cord-tester1 192.168.100.4/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth8 -l l5 cord-tester1 192.168.100.5/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth10 -l l6 cord-tester1 192.168.100.6/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth12 -l l7 cord-tester1 192.168.100.7/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth14 -l l8 cord-tester1 192.168.100.8/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth16 -l l9 cord-tester1 192.168.100.9/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth18 -l l10 cord-tester1 192.168.100.10/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth20 -l l11 cord-tester1 192.168.100.11/24
-Running PIPEWORK cmd: pipework pon1_128 -i veth22 -l l12 cord-tester1 192.168.100.12/24
-Running PIPEWORK cmd: pipework pon1_129 -i veth24 -l l13 cord-tester1 192.168.100.13/24
-Running PIPEWORK cmd: pipework pon1_130 -i veth26 -l l14 cord-tester1 192.168.100.14/24
-Running PIPEWORK cmd: pipework pon1_131 -i veth28 -l l15 cord-tester1 192.168.100.15/24
-Running PIPEWORK cmd: pipework pon1_132 -i veth30 -l l16 cord-tester1 192.168.100.16/24
-Running PIPEWORK cmd: pipework pon1_133 -i veth32 -l l17 cord-tester1 192.168.100.17/24
-Running PIPEWORK cmd: pipework pon1_134 -i veth34 -l l18 cord-tester1 192.168.100.18/24
-Running PIPEWORK cmd: pipework pon1_135 -i veth36 -l l19 cord-tester1 192.168.100.19/24
-Running PIPEWORK cmd: pipework pon1_136 -i veth38 -l l20 cord-tester1 192.168.100.20/24
-Running PIPEWORK cmd: pipework pon1_137 -i veth40 -l l21 cord-tester1 192.168.100.21/24
-Running PIPEWORK cmd: pipework pon1_138 -i veth42 -l l22 cord-tester1 192.168.100.22/24
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 340b50adba61
-Checking operational status for device 340b50adba61
-Installing OLT app
-Adding subscribers through OLT app
-172.17.0.4 - - [01/Aug/2017 09:49:10] "POST /RPC2 HTTP/1.1" 200 -
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha
-Admin state of uni_port is down
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Admin state of uni_port is up now
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Joining channel 0 for subscriber port veth0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-All subscribers have joined the channel
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth0 timed out
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Error deleting subscriber for device of:0000000000000001 on port 128
-Error deleting subscriber for device of:0000000000000001 on port 129
-Error deleting subscriber for device of:0000000000000001 on port 130
-Error deleting subscriber for device of:0000000000000001 on port 131
-Error deleting subscriber for device of:0000000000000001 on port 132
-Error deleting subscriber for device of:0000000000000001 on port 133
-Error deleting subscriber for device of:0000000000000001 on port 134
-Error deleting subscriber for device of:0000000000000001 on port 135
-Error deleting subscriber for device of:0000000000000001 on port 136
-Error deleting subscriber for device of:0000000000000001 on port 137
-Error deleting subscriber for device of:0000000000000001 on port 138
-Disabling device 340b50adba61
-Deleting device 340b50adba61
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Unhandled Error
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 57, in <lambda>
-    installSignalHandlers=False))
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1192, in run
-    self.mainLoop()
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop
-    self.runUntilCurrent()
---- <exception caught here> ---
-  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 824, in runUntilCurrent
-    call.func(*call.args, **call.kw)
-  File "/root/test/src/test/voltha/volthaTest.py", line 5026, in igmp_flow_check_operating_onu_admin_state
-    assert_equal(self.success, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-exceptions.AssertionError: False != True
-FAIL
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-======================================================================
-FAIL: Test Method:
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "/usr/local/lib/python2.7/dist-packages/nose-1.3.7-py2.7.egg/nose/twistedtools.py", line 165, in wrapper
-    % timeout)
-TimeExpired: timeout expired before end of test (300.000000 s.)
--------------------- >> begin captured stdout << ---------------------
-result.ok, result.status_code are False and 409
-
---------------------- >> end captured stdout << ----------------------
--------------------- >> begin captured logging << --------------------
-scapy.runtime: WARNING: No route found for IPv6 destination :: (no default route?)
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-docker.auth.auth: DEBUG: Trying paths: ['/root/.docker/config.json', '/root/.dockercfg']
-docker.auth.auth: DEBUG: No config file found
-cordTester: INFO: Connecting to controller at 172.17.0.2
-cordTester: INFO: onoscli: Trying to connect to 172.17.0.2
-onoscli: INFO:
-cordTester: INFO: Spawning pexpect for ip 172.17.0.2
-cordTester: INFO: ssh connection asked for password, gave password
-cordTester: INFO: Command 'summary -j' sent to onoscli.
-cordTester: ERROR: onoscli: eof exception found
-cordTester: ERROR: onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-cordTester: INFO: Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-cordTester: INFO: Enabling ponsim olt
-cordTester: INFO: Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-cordTester: INFO: Enabling device 340b50adba61
-cordTester: INFO: Checking operational status for device 340b50adba61
-cordTester: INFO: Installing OLT app
-cordTester: INFO: Adding subscribers through OLT app
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: DEBUG: Adding group 225.0.0.1
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha
-cordTester: INFO: Admin state of uni_port is down
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Admin state of uni_port is up now
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: All subscribers have joined the channel
-cordTester: INFO: Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-cordTester: INFO: Subscriber on port veth4 timed out
-cordTester: INFO: Subscriber on port veth0 timed out
-cordTester: INFO: Joining channel 0 for subscriber port veth4
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: INFO: Joining channel 0 for subscriber port veth0
-cordTester: INFO: This service is failed and other services will not run for this subscriber
-cordTester: INFO: This Subscriber is tested for multiple service eligibility
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 128
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 129
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 130
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 131
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 132
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 133
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 134
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 135
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 136
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 137
-cordTester: ERROR: Error deleting subscriber for device of:0000000000000001 on port 138
-cordTester: INFO: Disabling device 340b50adba61
-cordTester: INFO: Deleting device 340b50adba61
-cordTester: INFO: Uninstalling OLT app
---------------------- >> end captured logging << ---------------------
-
-----------------------------------------------------------------------
-Ran 1 test in 324.349s
-
-FAILED (failures=1)
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic Failure
-Done running tests
-Removing test container cord-tester1
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic b/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic
deleted file mode 100644
index 9b1d7f6..0000000
--- a/src/test/results/voltha/ponsim/test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic
+++ /dev/null
@@ -1,141 +0,0 @@
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$ sudo ./cord-test.py run -m manifest-ponsim.json -t voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic
-# Host [172.17.0.2]:8101 found: line 3 type RSA
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-result.ok, result.status_code are False and 409
-ONOS app cord-config, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app aaa, version 2.0-SNAPSHOT installed
-result.ok, result.status_code are False and 409
-ONOS app igmp, version 2.0-SNAPSHOT installed
-Controller IP [u'172.17.0.2'], Test type voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic
-Installing cord tester ONOS app /home/ubuntu/cord-tester/src/test/apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar
-result.ok, result.status_code are False and 409
-WARNING: The DOCKER_HOST_IP variable is not set. Defaulting to a blank string.
-compose_consul_1 is up-to-date
-compose_fluentd_1 is up-to-date
-compose_zookeeper_1 is up-to-date
-compose_registrator_1 is up-to-date
-compose_kafka_1 is up-to-date
-IP 172.18.0.2 for service consul
-IP 172.18.0.5 for service kafka
-IP 172.18.0.4 for service zookeeper
-IP 172.18.0.6 for service registrator
-IP 172.18.0.3 for service fluentd
-Chameleon voltha sevice is already running. Skipped start
-VOLTHA core is already running. Skipped start
-VOLTHA ofagent is already running. Skipped start
-PONSIM already running. Skipped start
-Radius server running with IP 172.17.0.3
-Running ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic'] tests across 1 containers in parallel
-Modifying scapy tool files before running a test: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic']
-Running tests: ['voltha:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic']
-WARNING: No route found for IPv6 destination :: (no default route?)
-Connecting to controller at 172.17.0.2
-onoscli: Trying to connect to 172.17.0.2
-# Host [172.17.0.2]:8101 found: line 1 type RSA
-Spawning pexpect for ip 172.17.0.2
-ssh connection asked for password, gave password
-Command 'summary -j' sent to onoscli.
-onoscli: eof exception found
-onoscli:     logout
-
-Connection to 172.17.0.2 closed.
-
-Installing the multi table app /root/test/src/test/voltha/../apps/ciena-cordigmp-multitable-3.0-SNAPSHOT.oar for subscriber test
-Test Method: ... Enabling ponsim olt
-Pre-provisioning ponsim_olt with address 172.17.0.1:50060
-Enabling device 6f6b15aa05c6
-Checking operational status for device 6f6b15aa05c6
-Installing OLT app
-Adding subscribers through OLT app
-Adding group 225.0.0.1
-Adding group 225.0.0.1
-All subscribers have joined the channel
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha
-Admin state of uni_port is down
-All subscribers have joined the channel
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Leaving channel 0 for subscriber on port veth4
-Subscriber on port veth0 timed out
-Leaving channel 0 for subscriber on port veth0
-Interface veth4 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth4 checking data traffic receiving from group 225.0.0.1, channel 0
-Admin state of uni_port is up now
-Interface veth0 Join RX stats for subscriber, Avg 0.000 usecs, Std deviation 0.000 usecs, Min 0.000, Max 0.000 for 1 packets
-
-Subscriber on port veth0 checking data traffic receiving from group 225.0.0.1, channel 0
-Subscriber on port veth4 timed out
-Subscriber on port veth4 not received 0 packets
-Joining channel 0 for subscriber port veth4
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Subscriber on port veth0 timed out
-Subscriber on port veth0 not received 0 packets
-Joining channel 0 for subscriber port veth0
-This service is failed and other services will not run for this subscriber
-This Subscriber is tested for multiple service eligibility
-Deleted subscriber for device of:0000000000000001 on port  128
-Deleted subscriber for device of:0000000000000001 on port  129
-Deleted subscriber for device of:0000000000000001 on port  130
-Deleted subscriber for device of:0000000000000001 on port  131
-Deleted subscriber for device of:0000000000000001 on port  132
-Deleted subscriber for device of:0000000000000001 on port  133
-Deleted subscriber for device of:0000000000000001 on port  134
-Deleted subscriber for device of:0000000000000001 on port  135
-Deleted subscriber for device of:0000000000000001 on port  136
-Deleted subscriber for device of:0000000000000001 on port  137
-Deleted subscriber for device of:0000000000000001 on port  138
-Disabling device 6f6b15aa05c6
-Deleting device 6f6b15aa05c6
-Uninstalling OLT app
-Exception in thread Thread-2:
-Traceback (most recent call last):
-  File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
-    self.run()
-  File "/usr/lib/python2.7/threading.py", line 763, in run
-    self.__target(*self.__args, **self.__kwargs)
-  File "/root/test/src/test/voltha/volthaTest.py", line 1444, in voltha_subscribers
-    assert_equal(test_status, True)
-  File "/usr/lib/python2.7/unittest/case.py", line 515, in assertEqual
-    assertion_func(first, second, msg=msg)
-  File "/usr/lib/python2.7/unittest/case.py", line 508, in _baseAssertEqual
-    raise self.failureException(msg)
-AssertionError: False != True
-
-Igmp flow check expected to fail, hence ignore the test_status of igmp flow check
-ok
-Installing back the cord igmp app /root/test/src/test/voltha/../apps/ciena-cordigmp-3.0-SNAPSHOT.oar for subscriber test on exit
-
-----------------------------------------------------------------------
-Ran 1 test in 138.053s
-
-OK
-Test volthaTest.py:voltha_exchange.test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic Success
-Done running tests
-ubuntu@cord-rs-s8:~/cord-tester/src/test/setup$
-
diff --git a/src/test/results/vsg/vsg-tests-output.log b/src/test/results/vsg/vsg-tests-output.log
deleted file mode 100644
index c0d780c..0000000
--- a/src/test/results/vsg/vsg-tests-output.log
+++ /dev/null
@@ -1,854 +0,0 @@
-*****************   Test 1   ****************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_health
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-Pinging VSG mysite_exampleservice-2 at IP 172.27.0.3
-VSG mysite_exampleservice-2 at IP 172.27.0.3 is reachable
-Pinging VSG mysite_vsg-1 at IP 172.27.0.2
-VSG mysite_vsg-1 at IP 172.27.0.2 is reachable
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 7.366s
-
-OK
-*****************  Test 2  *********************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_health_check
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Pinging VSG mysite_vsg-1 at IP 172.27.0.2
-VSG mysite_vsg-1 at IP 172.27.0.2 is reachable
-vsg status is True
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 9.226s
-
-OK
-******************  Test 3 *******************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ...
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 19.588s
-
-OK
-******************* Test 4  ********************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_login
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ...
-Checking if VSG at 172.27.0.3 is accessible from compute node functional-chance.cord.lab
-OK
-Checking if VSG at 172.27.0.2 is accessible from compute node functional-chance.cord.lab
-OK
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 9.543s
-
-OK
-******************** Test 5 *******************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_default_route_through_testclient
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... ok
-
-----------------------------------------------------------------------
-Ran 1 test in 9.972s
-
-OK
-********************** Test 6 ***************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_through_testclient
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... ok
-
-----------------------------------------------------------------------
-Ran 1 test in 8.216s
-
-OK
-******************* Test 7 ******************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.3628' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending icmp echo requests to external network 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 7.697s
-
-OK
-********************** Test 8 ************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_to_google
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.3696' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending icmp ping requests to www.google.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 6.041s
-
-OK
-******************** Test 9 *****************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_to_retrieve_content_from_google_to_validate_path_mtu
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.1850' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.131 for vcpe0.222.111
-Initiating get requests to www.google.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 5.820s
-
-OK
-********************  Test 10 **************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_to_retrieve_content_from_rediff_to_validate_path_mtu
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.1917' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.131 for vcpe0.222.111
-Initiating get requests to www.rediff.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 8.367s
-
-OK
-*************************** Test 11 *********************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_to_retrieve_content_from_yahoo_to_validate_path_mtu
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.1984' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.131 for vcpe0.222.111
-Initiating get requests to www.yahoo.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 6.941s
-
-OK
-
-*************** Test 12 ************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_to_retrieve_content_from_facebook_to_validate_path_mtu
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.2051' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.131 for vcpe0.222.111
-Initiating get requests to www.facebook.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 6.309s
-
-OK
-***************** Test 13 ******************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_to_invalid_host
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.3846' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending icmp ping requests to non existent host www.goglee.com
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 17.119s
-
-OK
-**********************  Test 14  ****************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_with_ttl_1
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.3982' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending icmp ping requests to host 8.8.8.8 with ttl 1
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 5.899s
-
-OK
-*******************  Test 15 ********************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_with_wan_interface_toggle_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.4174' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending ICMP pings to host 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 16.994s
-
-OK
-******************** Test 16 ******************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_with_lan_interface_toggle_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.4468' to '/etc/resolv.conf': Device or resource busy
-Got DHCP IP 192.168.0.77 for vcpe0.222.111
-Sending ICMP pings to host 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 25.069s
-
-OK
-****************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_vcpe_interface_and_validate_dhcp_ip_after_interface_toggle
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are False and 409
-Test Method: ... mv: cannot move '/etc/resolv.conf.dhclient-new.1121' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 8.961s
-
-OK
-**************************************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_after_restarting_vcpe_instance
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ...
-mv: cannot move '/etc/resolv.conf.dhclient-new.4728' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 48.540s
-
-OK
-*****************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_for_external_connectivity_with_vcpe_container_paused
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ...
-mv: cannot move '/etc/resolv.conf.dhclient-new.603' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 15.656s
-
-OK
-
-*****************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_with_deny_destination_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are False and 409
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.612' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-In finally block
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.212s
-
-OK
-
-*******************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_with_rule_add_and_delete_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.797' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.405s
-
-OK
-
-*******************************************************
-
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_verifying_reachability_for_non_blocked_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.866' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-route is 204.79.197.203
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.724s
-
-OK
-*******************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_appending_rules_with_deny_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.1479' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-RTNETLINK answers: File exists
-route is 204.79.197.203
-RTNETLINK answers: File exists
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 36.572s
-
-OK
-*******************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_removing_one_rule_denying_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.4131' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-route is 204.79.197.203
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 46.450s
-
-OK
-*********************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_changing_rule_id_deny_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.4224' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.805s
-
-OK
-************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_changing_deny_rule_to_accept_dest_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.4466' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.567s
-
-OK
-*******************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_denying_destination_network
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.4538' to '/etc/resolv.conf': Device or resource busy
-route is 204.79.197.203
-route is 204.79.197.210
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.082s
-
-OK
-*************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_denying_destination_network_subnet_modification
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.4617' to '/etc/resolv.conf': Device or resource busy
-route is 204.79.197.203
-route is 204.79.197.210
-route is 204.79.197.224
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 44.826s
-
-OK
-******************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_with_deny_source_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.4810' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.880s
-
-OK
-********************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_rule_with_add_and_delete_deny_source_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.4873' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.416s
-
-OK
-****************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_rule_with_deny_icmp_protocol_echo_requests_type
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.5392' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.047s
-
-OK
-************************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_rule_with_deny_icmp_protocol_echo_reply_type
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.3886' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.375s
-
-OK
-************************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_changing_deny_rule_to_accept_rule_with_icmp_protocol_echo_requests_type
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.3950' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.278s
-
-OK
-
-**************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_changing_deny_rule_to_accept_rule_with_icmp_protocol_echo_reply_type
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5644' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.751s
-
-OK
-*************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_for_deny_icmp_protocol
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5727' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.042s
-
-OK
-****************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_rule_deny_icmp_protocol_and_destination_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5806' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 47.133s
-
-OK
-**********************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_flushing_all_configured_rules
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.4473' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-RTNETLINK answers: File exists
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 79.714s
-
-OK
-***************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_replacing_deny_rule_to_accept_rule_ipv4_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.7957' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.115s
-
-OK
-*************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_traffic_going_out_of_wan_interface_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.8115' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.771s
-
-OK
-********************************************************
-root@3edfd8d51f8c:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_dnat_change_modifying_destination_ip_address
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.13566' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 25.330s
-
-OK
-********************************************************
-root@3edfd8d51f8c:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_dnat_modifying_destination_ip_and_delete
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.13737' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.179s
-
-OK
-***************************************************
-root@3edfd8d51f8c:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_dnat_modifying_destination_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.13812' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.543s
-
-OK
-*************************************************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_dns_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.707' to '/etc/resolv.conf': Device or resource busy
-RTNETLINK answers: File exists
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 13.345s
-
-OK
-******************************************************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_dns_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.1972' to '/etc/resolv.conf': Device or resource busy
-ping: unknown host google-public-dns-a.google.com
-mv: cannot move '/etc/resolv.conf.dhclient-new.2035' to '/etc/resolv.conf': Device or resource busy
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 33.525s
-
-OK
-*******************************************************************
-root@8376ade8b506:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_flushing_all_configured_rules
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.2252' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 71.063s
-
-OK
-************************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_ipv4_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.4648' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.181s
-
-OK
-*******************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_replacing_deny_rule_to_accept_rule_ipv4_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.4819' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 22.381s
-
-OK
-************************************************************
-root@bbec06b191d6:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_traffic_coming_on_lan_interface_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.8036' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 24.240s
-
-OK
-**********************************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_traffic_going_out_of_wan_interface_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.5050' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.629s
-
-OK
-******************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_traffic_from_lan_to_wan_in_vcpe
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5114' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.977s
-
-OK
-*********************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_dns_traffic
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-RTNETLINK answers: File exists
-mv: cannot move '/etc/resolv.conf.dhclient-new.5240' to '/etc/resolv.conf': Device or resource busy
-ping: unknown host google-public-dns-a.google.com
-mv: cannot move '/etc/resolv.conf.dhclient-new.5298' to '/etc/resolv.conf': Device or resource busy
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 31.677s
-
-OK
-*****************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_firewall_deny_all_ipv4_traffic_vcpe_container_restart
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5360' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 57.841s
-
-OK
-*****************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_nat_dnat_modifying_destination_ip
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5426' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.689s
-
-OK
-******************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_nat_dnat_modifying_destination_ip_and_delete
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5489' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 21.492s
-
-OK
-*******************************************************
-root@dc7cc3594ebf:~/test/src/test# nosetests -v -s vsg/vsgTest.py:vsg_exchange.test_vsg_nat_dnat_change_modifying_destination_ip_address
-Unable to connect to test host prod.cord.lab
-result.ok, result.status_code are True and 200
-Test Method: ... /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:334: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  SNIMissingWarning
-/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:132: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this. For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-  InsecurePlatformWarning
-mv: cannot move '/etc/resolv.conf.dhclient-new.5553' to '/etc/resolv.conf': Device or resource busy
-route is 8.8.8.8
-ok
-
-----------------------------------------------------------------------
-Ran 1 test in 23.831s
-
-OK
-
diff --git a/src/test/scale/__init__.py b/src/test/scale/__init__.py
deleted file mode 100644
index 2d1b1a2..0000000
--- a/src/test/scale/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-tls_dir = os.path.join(working_dir, '../tls')
-vrouter_dir = os.path.join(working_dir, '../vrouter')
-vsg_dir = os.path.join(working_dir, '../vsg')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
-__path__.append(tls_dir)
-__path__.append(vrouter_dir)
-__path__.append(vsg_dir)
diff --git a/src/test/scale/scaleTest.json b/src/test/scale/scaleTest.json
deleted file mode 100644
index afb522a..0000000
--- a/src/test/scale/scaleTest.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "NUM_SUBSCRIBERS" : 100,
-    "SUBSCRIBER_ACCOUNT_NUM" : 100,
-    "SUBSCRIBER_S_TAG" : 500,
-    "SUBSCRIBER_C_TAG" : 500,
-    "SUBSCRIBERS_PER_S_TAG" : 8
-}
diff --git a/src/test/scale/scaleTest.py b/src/test/scale/scaleTest.py
deleted file mode 100644
index b290016..0000000
--- a/src/test/scale/scaleTest.py
+++ /dev/null
@@ -1,1406 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import time
-import os
-import sys
-import json
-import requests
-import random
-from nose.tools import *
-from scapy.all import *
-from twisted.internet import defer
-from nose.twistedtools import reactor, deferred
-from CordTestUtils import *
-from OltConfig import OltConfig
-from onosclidriver import OnosCliDriver
-from SSHTestAgent import SSHTestAgent
-from Channels import Channels, IgmpChannel
-from IGMP import *
-from CordLogger import CordLogger
-from VSGAccess import VSGAccess
-from OnosFlowCtrl import OnosFlowCtrl
-#imports for cord-subscriber module
-from subscriberDb import SubscriberDB
-from Stats import Stats
-from threadPool import ThreadPool
-import threading
-from EapTLS import TLSAuthTest
-from CordTestUtils import log_test as log
-from CordTestConfig import setup_module, running_on_ciab
-from OnosCtrl import OnosCtrl
-from CordContainer import Onos
-from CordSubscriberUtils import CordSubscriberUtils, XosUtils
-from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_shell, cord_test_radius_restart
-from Scale import scale
-log.setLevel('INFO')
-
-
-class scale_exchange(CordLogger):
-    HOST = "10.1.0.1"
-    USER = "vagrant"
-    PASS = "vagrant"
-    head_node = os.getenv('HEAD_NODE', 'prod')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    restApiXos =  None
-    cord_subscriber = None
-    SUBSCRIBER_ACCOUNT_NUM = 100
-    SUBSCRIBER_S_TAG = 500
-    SUBSCRIBER_C_TAG = 500
-    SUBSCRIBERS_PER_S_TAG = 8
-    subscriber_info = []
-    volt_subscriber_info = []
-    restore_methods = []
-    TIMEOUT=120
-    NUM_SUBSCRIBERS = 16
-    wan_intf_ip = '10.6.1.129'
-    V_INF1 = 'veth0'
-    V_INF2 = 'veth1'
-    MGROUP1 = '239.1.2.3'
-    MGROUP2 = '239.2.2.3'
-    MINVALIDGROUP1 = '255.255.255.255'
-    MINVALIDGROUP2 = '239.255.255.255'
-    MMACGROUP1 = "01:00:5e:01:02:03"
-    MMACGROUP2 = "01:00:5e:02:02:03"
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.0.22'
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    igmp_app = 'org.opencord.igmp'
-    acl_app = 'org.onosproject.acl'
-    aaa_app = 'org.opencord.aaa'
-    app = 'org.onosproject.cli'
-    APP_NAME = 'org.ciena.xconnect'
-    INTF_TX_DEFAULT = 'veth2'
-    INTF_RX_DEFAULT = 'veth0'
-    default_port_map = {
-        PORT_TX_DEFAULT : INTF_TX_DEFAULT,
-        PORT_RX_DEFAULT : INTF_RX_DEFAULT,
-        INTF_TX_DEFAULT : PORT_TX_DEFAULT,
-        INTF_RX_DEFAULT : PORT_RX_DEFAULT
-        }
-    vrouter_apps = ('org.onosproject.proxyarp', 'org.onosproject.hostprovider', 'org.onosproject.vrouter', 'org.onosproject.fwd')
-    MAX_PORTS = 100
-    subscriber_apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
-    olt_apps = () #'org.opencord.cordmcast')
-    vtn_app = 'org.opencord.vtn'
-    table_app = 'org.ciena.cordigmp'
-    aaa_loaded = False
-    table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
-    app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
-    olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-1.2-SNAPSHOT.oar')
-    olt_app_name = 'org.onosproject.olt'
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    cpqd_path = os.path.join(test_path, '..', 'setup')
-    ovs_path = cpqd_path
-    test_services = ('IGMP', 'TRAFFIC')
-    num_joins = 0
-    num_subscribers = 0
-    leave_flag = True
-    num_channels = 0
-    recv_timeout = False
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-    SUBSCRIBER_TIMEOUT = 300
-    device_id = 'of:' + get_mac()
-
-    CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-
-    @classmethod
-    def setUpCordApi(cls):
-        num_subscribers = max(cls.NUM_SUBSCRIBERS, 10)
-        cls.cord_subscriber = CordSubscriberUtils(num_subscribers,
-                                                  account_num = cls.SUBSCRIBER_ACCOUNT_NUM,
-                                                  s_tag = cls.SUBSCRIBER_S_TAG,
-                                                  c_tag = cls.SUBSCRIBER_C_TAG,
-                                                  subscribers_per_s_tag = cls.SUBSCRIBERS_PER_S_TAG)
-        cls.restApiXos = XosUtils.getRestApi()
-
-    @classmethod
-    def setUpClass(cls):
-	log.info('in setUp class 00000000000000')
-        cls.controllers = get_controllers()
-        cls.controller = cls.controllers[0]
-        cls.cli = None
-        cls.on_pod = running_on_pod()
-        cls.on_ciab = running_on_ciab()
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.vcpes = cls.olt.get_vcpes()
-        cls.vcpes_dhcp = cls.olt.get_vcpes_by_type('dhcp')
-        cls.vcpes_reserved = cls.olt.get_vcpes_by_type('reserved')
-        cls.dhcp_vcpes_reserved = [ 'vcpe{}.{}.{}'.format(i, cls.vcpes_reserved[i]['s_tag'], cls.vcpes_reserved[i]['c_tag'])
-                                    for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.untagged_dhcp_vcpes_reserved = [ 'vcpe{}'.format(i) for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.container_vcpes_reserved = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_reserved ]
-        vcpe_dhcp_reserved = None
-        vcpe_container_reserved = None
-        if cls.vcpes_reserved:
-            vcpe_dhcp_reserved = cls.dhcp_vcpes_reserved[0]
-            if cls.on_pod is False:
-                vcpe_dhcp_reserved = cls.untagged_dhcp_vcpes_reserved[0]
-            vcpe_container_reserved = cls.container_vcpes_reserved[0]
-
-        cls.vcpe_dhcp_reserved = vcpe_dhcp_reserved
-        cls.vcpe_container_reserved = vcpe_container_reserved
-        dhcp_vcpe_offset = len(cls.vcpes_reserved)
-        cls.dhcp_vcpes = [ 'vcpe{}.{}.{}'.format(i+dhcp_vcpe_offset, cls.vcpes_dhcp[i]['s_tag'], cls.vcpes_dhcp[i]['c_tag'])
-                           for i in xrange(len(cls.vcpes_dhcp))  ]
-        cls.untagged_dhcp_vcpes = [ 'vcpe{}'.format(i+dhcp_vcpe_offset) for i in xrange(len(cls.vcpes_dhcp)) ]
-        cls.container_vcpes = [ 'vcpe-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_dhcp ]
-        vcpe_dhcp = None
-        vcpe_container = None
-        #cache the first dhcp vcpe in the class for quick testing
-        if cls.vcpes_dhcp:
-            vcpe_container = cls.container_vcpes[0]
-            vcpe_dhcp = cls.dhcp_vcpes[0]
-            if cls.on_pod is False:
-                vcpe_dhcp = cls.untagged_dhcp_vcpes[0]
-        cls.vcpe_container = vcpe_container_reserved or vcpe_container
-        cls.vcpe_dhcp = vcpe_dhcp_reserved or vcpe_dhcp
-        VSGAccess.setUp()
-        cls.setUpCordApi()
-        if cls.on_pod is True:
-            cls.openVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
-
-    @classmethod
-    def tearDownClass(cls):
-        VSGAccess.tearDown()
-        if cls.on_pod is True:
-            cls.closeVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
-
-    def log_set(self, level = None, app = 'org.onosproject'):
-        CordLogger.logSet(level = level, app = app, controllers = self.controllers, forced = True)
-######################## vsg - vcpe utility functions #########################
-    @classmethod
-    def closeVCPEAccess(cls, volt_subscriber_info):
-        OnosCtrl.uninstall_app(cls.APP_NAME, onos_ip = cls.HEAD_NODE)
-
-    @classmethod
-    def openVCPEAccess(cls, volt_subscriber_info):
-        """
-        This code is used to configure leaf switch for head node access to compute node over fabric.
-        Care is to be taken to avoid overwriting existing/default vcpe flows.
-        The access is opened for generated subscriber info which should not overlap.
-        We target the fabric onos instance on head node.
-        """
-        version = Onos.getVersion(onos_ip = cls.HEAD_NODE)
-        app_version = '1.0-SNAPSHOT'
-        major = int(version.split('.')[0])
-        minor = int(version.split('.')[1])
-        if major > 1:
-            app_version = '2.0-SNAPSHOT'
-        elif major == 1 and minor > 10:
-            app_version = '2.0-SNAPSHOT'
-        cls.APP_FILE = os.path.join(cls.test_path, '..', 'apps/xconnect-{}.oar'.format(app_version))
-        OnosCtrl.install_app(cls.APP_FILE, onos_ip = cls.HEAD_NODE)
-        time.sleep(2)
-        s_tags = map(lambda tenant: int(tenant['voltTenant']['s_tag']), volt_subscriber_info)
-        #only get unique vlan tags
-        s_tags = list(set(s_tags))
-        devices = OnosCtrl.get_device_ids(controller = cls.HEAD_NODE)
-        if devices:
-            device_config = {}
-            for device in devices:
-                device_config[device] = []
-                for s_tag in s_tags:
-                    xconnect_config = {'vlan': s_tag, 'ports' : [ cls.FABRIC_PORT_HEAD_NODE, cls.FABRIC_PORT_COMPUTE_NODE ] }
-                    device_config[device].append(xconnect_config)
-
-            cfg = { 'apps' : { 'org.ciena.xconnect' : { 'xconnectTestConfig' : device_config } } }
-            OnosCtrl.config(cfg, controller = cls.HEAD_NODE)
-
-    def get_system_cpu_usage(self):
-        """ Getting compute node CPU usage """
-        ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-        cmd = "top -b -n1 | grep 'Cpu(s)' | awk '{print $2 + $4}'"
-        status, output = ssh_agent.run_cmd(cmd)
-        assert_equal(status, True)
-        return float(output)
-
-    def vsg_for_external_connectivity(self, subscriber_index, reserved = False):
-        if reserved is True:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes_reserved[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes_reserved[subscriber_index]
-        else:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes[subscriber_index]
-        mgmt = 'eth0'
-        host = '8.8.8.8'
-        self.success = False
-        assert_not_equal(vcpe, None)
-        vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        assert_not_equal(vcpe_ip, None)
-        log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        log.info('Sending icmp echo requests to external network 8.8.8.8')
-        st, _ = getstatusoutput('ping -c 3 8.8.8.8')
-        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        assert_equal(st, 0)
-
-    def vsg_xos_subscriber_create(self, index, subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return ''
-        if subscriber_info is None:
-            subscriber_info = self.cord_subscriber.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.cord_subscriber.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        vcpe = 'vcpe-{}-{}'.format(s_tag, c_tag)
-        subId = self.cord_subscriber.subscriberCreate(index, subscriber_info, volt_subscriber_info)
-        if subId:
-            #if the vsg instance was already instantiated, then reduce delay
-            if c_tag % self.SUBSCRIBERS_PER_S_TAG == 0:
-                delay = 350
-            else:
-                delay = 90
-            log.info('Delaying %d seconds for the VCPE to be provisioned' %(delay))
-            time.sleep(delay)
-            #log.info('Testing for external connectivity to VCPE %s' %(vcpe))
-            #self.vsg_for_external_connectivity(index)
-
-        return subId
-
-    def vsg_delete(self, num_subscribers):
-        if self.on_pod is False:
-            return
-        num_subscribers = min(num_subscribers, len(self.cord_subscriber.subscriber_info))
-        for index in xrange(num_subscribers):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId and subId != '0':
-                self.vsg_xos_subscriber_delete(index, subId = subId)
-
-    def vsg_xos_subscriber_delete(self, index, subId = '', voltId = '', subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return
-        self.cord_subscriber.subscriberDelete(index, subId = subId, voltId = voltId,
-                                              subscriber_info = subscriber_info,
-                                              volt_subscriber_info = volt_subscriber_info)
-
-    def vsg_xos_subscriber_id(self, index):
-        if self.on_pod is False:
-            return ''
-        return self.cord_subscriber.subscriberId(index)
-
-    def vsg_xos_subscriber_create_reserved(self):
-        if self.on_pod is False:
-            return
-        tags_reserved = [ (int(vcpe['s_tag']), int(vcpe['c_tag'])) for vcpe in self.vcpes_reserved ]
-        volt_tenants = self.restApiXos.ApiGet('TENANT_VOLT')
-        subscribers = self.restApiXos.ApiGet('TENANT_SUBSCRIBER')
-        reserved_tenants = filter(lambda tenant: (int(tenant['s_tag']), int(tenant['c_tag'])) in tags_reserved, volt_tenants)
-        reserved_config = []
-        for tenant in reserved_tenants:
-            for subscriber in subscribers:
-                if int(subscriber['id']) == int(tenant['subscriber']):
-                    volt_subscriber_info = {}
-                    volt_subscriber_info['voltTenant'] = dict(s_tag = tenant['s_tag'],
-                                                              c_tag = tenant['c_tag'],
-                                                              subscriber = tenant['subscriber'])
-                    volt_subscriber_info['volt_id'] = tenant['id']
-                    volt_subscriber_info['account_num'] = subscriber['identity']['account_num']
-                    reserved_config.append( (subscriber, volt_subscriber_info) )
-                    break
-            else:
-                log.info('Subscriber not found for tenant %s, s_tag: %s, c_tag: %s' %(str(tenant['subscriber']),
-                                                                                      str(tenant['s_tag']),
-                                                                                      str(tenant['c_tag'])))
-
-        for subscriber_info, volt_subscriber_info in reserved_config:
-            self.vsg_xos_subscriber_delete(0,
-                                           subId = str(subscriber_info['id']),
-                                           voltId = str(volt_subscriber_info['volt_id']),
-                                           subscriber_info = subscriber_info,
-                                           volt_subscriber_info = volt_subscriber_info)
-            subId = self.vsg_xos_subscriber_create(0,
-                                                   subscriber_info = subscriber_info,
-                                                   volt_subscriber_info = volt_subscriber_info)
-            log.info('Created reserved subscriber %s' %(subId))
-
-    @deferred(1800)
-    def test_scale_for_vsg_vm_creations(self):
-	try:
-	    df = defer.Deferred()
-	    def scale_vsg_vms(df):
-        	for index in xrange(len(self.cord_subscriber.subscriber_info)):
-                    #check if the index exists
-                    subId = self.vsg_xos_subscriber_id(index)
-                    log.info('test_vsg_xos_subscriber_creation - subId is %s'%subId)
-                    if subId and subId != '0':
-                        self.vsg_xos_subscriber_delete(index, subId = subId)
-                    subId = self.vsg_xos_subscriber_create(index)
-                    log.info('Created Subscriber %s' %(subId))
-                df.callback(0)
-            reactor.callLater(0, scale_vsg_vms, df)
-            return df
-	finally:
-	    pass
-	    #self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-	    self.vsg_xos_subscriber_create_reserved
-
-    @deferred(1800)
-    def test_scale_for_vcpe_creations(self):
-        try:
-            df = defer.Deferred()
-            def scale_vcpe_instances(df):
-                for index in xrange(len(self.cord_subscriber.subscriber_info)):
-                    #check if the index exists
-                    subId = self.vsg_xos_subscriber_id(index)
-                    log.info('test_vsg_xos_subscriber_creation')
-                    if subId and subId != '0':
-                        self.vsg_xos_subscriber_delete(index, subId = subId)
-                    subId = self.vsg_xos_subscriber_create(index)
-                    log.info('Created Subscriber %s' %(subId))
-                    df.callback(0)
-            reactor.callLater(0, scale_vcpe_instances, df)
-            return df
-        except:
-            self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-            self.vsg_xos_subscriber_create_reserved
-
-    @deferred(1800)
-    def test_scale_of_subcriber_vcpe_creations_in_single_vsg_vm(self):
-	try:
-            df = defer.Deferred()
-            def scale_vcpe_instances(df):
-                subId = self.vsg_xos_subscriber_create(100)
-                if subId and subId != '0':
-                   self.vsg_xos_subscriber_delete(100, subId)
-                df.callback(0)
-            reactor.callLater(0, scale_vsg_vms, df)
-            return df
-        except:
-            self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-            self.vsg_xos_subscriber_create_reserved
-
-    @deferred(1800)
-    def test_scale_of_subcriber_vcpe_creations_in_multiple_vsg_vm(self):
-        try:
-            df = defer.Deferred()
-            def scale_vcpe_instances(df):
-                subId = self.vsg_xos_subscriber_create(100)
-                if subId and subId != '0':
-                    self.vsg_xos_subscriber_delete(100, subId)
-                df.callback(0)
-            reactor.callLater(0, scale_vsg_vms, df)
-            return df
-        except:
-            self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-            self.vsg_xos_subscriber_create_reserved
-
-    @deferred(1800)
-    def test_scale_of_subcriber_vcpe_creations_with_one_vcpe_in_one_vsg_vm(self):
-        try:
-            df = defer.Deferred()
-            def scale_vcpe_instances(df):
-                subId = self.vsg_xos_subscriber_create(100)
-                if subId and subId != '0':
-                    self.vsg_xos_subscriber_delete(100, subId)
-                df.callback(0)
-            reactor.callLater(0, scale_vsg_vms, df)
-            return df
-        except:
-            self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-            self.vsg_xos_subscriber_create_reserved
-
-    @deferred(1800)
-    def test_scale_for_cord_subscriber_creation_and_deletion(self):
-        try:
-            df = defer.Deferred()
-            def scale_vcpe_instances(df):
-                subId = self.vsg_xos_subscriber_create(100)
-                if subId and subId != '0':
-                    self.vsg_xos_subscriber_delete(100, subId)
-                df.callback(0)
-            reactor.callLater(0, scale_vsg_vms, df)
-            return df
-        except:
-            self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-            self.vsg_xos_subscriber_create_reserved
-
-    def test_cord_for_scale_of_subscriber_containers_per_compute_node(self):
-        pass
-
-    @deferred(10)
-    def test_latency_of_cord_for_control_packets_using_icmp_packet(self):
-        """
-	Test-Method:
-	1. Ping from cord-tester to wan interface IP of CiaB setup
-	2. Grep latency of ping packets
-	"""
-        df = defer.Deferred()
-        def scale_vcpe_instances(df):
-            cmd = "ping -c 4 {0} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
-            st, out = getstatusoutput(cmd)
-            if out != '':
-                out = out.split('/')
-                avg_rtt = out[1]
-                latency = float(avg_rtt)/float(2)
-            else:
-                latency = None
-            log.info('CORD setup latency calculated from icmp packet is = %s ms'%latency)
-            assert_not_equal(latency,None)
-            df.callback(0)
-        reactor.callLater(0, scale_vsg_vms, df)
-        return df
-
-    @deferred(20)
-    def test_latency_of_cord_for_control_packets_using_increasing_sizes_of_icmp_packet(self):
-        """
-	Test-Method:
-	1. Ping from cord-tester to wan interface IP of CiaB setup
-	2. Grep the latency of ping packet
-	3. Repeat the process for varying sizes of ping packets
-	"""
-        df = defer.Deferred()
-        def scale_vcpe_instances(df):
-            pckt_sizes = [100,500,1000,1500]
-            for size in pckt_sizes:
-                cmd = "ping -c 4 -s {} {} | tail -1| awk '{{print $4}}'".format(size,self.wan_intf_ip)
-                st, out = getstatusoutput(cmd)
-                if out != '':
-                    out = out.split('/')
-                    avg_rtt = out[1]
-                    latency = float(avg_rtt)/float(2)
-                else:
-                    latency = None
-            log.info('CORD setup latency calculated from icmp packet with size %s bytes is = %s ms'%(size,latency))
-            assert_not_equal(latency,None)
-            df.callback(0)
-        reactor.callLater(0, scale_vsg_vms, df)
-        return df
-
-    @deferred(10)
-    def test_latency_of_cord_with_traceroute(self):
-        """
-	Test-Method:
-	1. Traceroute from cord-tester to wan interface IP of CiaB setup
-	2. Grep the latency of ping packet
-	3. Repeat the process for varying sizes of ping packets
-	"""
-        df = defer.Deferred()
-        def scale_vcpe_instances(df):
-            cmd = "traceroute -q1 {} | tail -1| awk '{{print $4}}'".format(self.wan_intf_ip)
-            avg_rtt = float(0)
-            latency = None
-            for index in [1,2,3]:
-                st, out = getstatusoutput(cmd)
-                if out != '':
-                    avg_rtt += float(out)
-            latency = float(avg_rtt)/float(6)
-            log.info('CORD setup latency calculated from  traceroute is = %s ms'%latency)
-            assert_not_equal(latency,0.0)
-            assert_not_equal(latency,None)
-            df.callback(0)
-        reactor.callLater(0, scale_vsg_vms, df)
-        return df
-
-    #tested with 50 igmp joins on CiaB setup
-    @deferred(1000)
-    def test_scale_with_igmp_joins_for_multicast_groups_validating_cpu_usage(self, group_count=500):
-        """
-	Test-Method:
-	1. Register 500 (group count is number to test) igmp groups in onos
-	2. Send  igmp joins for registered groups
-	3. Send multicast traffic to all registered groups
-	4. Verify traffic forwards properly
-	"""
-        df = defer.Deferred()
-        def scale_igmp_joins(df):
-            OnosCtrl(self.igmp_app).activate()
-            groups = scale().generate_random_multicast_ip_addresses(count = group_count)
-            sources = scale().generate_random_unicast_ip_addresses(count = group_count)
-            scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
-	    try:
-                for index in range(group_count):
-                    scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-                    status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, True)
-                    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-	    except Exception as error:
-		log.info('Got unexpected error %s'%error)
-		raise
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_joins, df)
-        return df
-
-    #tested with 50 igmp joins on CiaB setup
-    @deferred(1000)
-    def test_scale_with_igmp_joins_for_multicast_groups_toggling_igmp_app(self, group_count=1000):
-	"""
-	Test-Method:
-	1. Register 1000 (group_count is a number to test, can increase the number)igmp groups in onos
-	2. Send  igmp joins for registered groups
-	3. Send multicast traffic to all registered groups
-	4. Verify traffic forwards properly
-	5. deactivate and activate igmp app in onos
-	6. Verify multicast traffic do not forward after igmp app deactivated
-	"""
-        df = defer.Deferred()
-        def scale_igmp_joins(df):
-            OnosCtrl(self.igmp_app).activate()
-            groups = scale().generate_random_multicast_ip_addresses(count = group_count)
-            sources = scale().generate_random_unicast_ip_addresses(count = group_count)
-            scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
-	    try:
-                for index in range(group_count):
-                    scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-                    status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, True)
-                    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-		log_test.info('Deactivating igmp app in onos')
-		OnosCtrl(self.igmp_app).deactivate()
-		time.sleep(2)
-		for index in range(group_count):
-                    status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, False)
-		    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-		OnosCtrl(self.igmp_app).activate()
-	    except Exception as error:
-		log.info('Got unexpected error %s'%error)
-		OnosCtrl(self.igmp_app).activate()
-		raise
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_joins, df)
-        return df
-
-    #tested with 50 igmp joins on CiaB setup
-    @deferred(1800)
-    def test_scale_with_igmp_joins_for_multicast_groups_validating_cpu_usage(self, group_count=2000):
-	"""
-	Test-Method:
-	1. Register (group count value to test) igmp groups in onos
-	2. Send  igmp joins for registered groups
-	3. Send multicast traffic to all registered groups
-	4. Verify traffic forwards properly
-	"""
-        df = defer.Deferred()
-        def scale_igmp_joins(df):
-            OnosCtrl(self.igmp_app).activate()
-            groups = scale().generate_random_multicast_ip_addresses(count = group_count)
-            sources = scale().generate_random_unicast_ip_addresses(count = group_count)
-            scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
-	    try:
-                for index in range(group_count):
-                    scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-                    status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, True)
-                    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-                    if index % 50 == 0:
-                        cpu_usage = scale().get_system_cpu_usage()
-                        log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_joins, df)
-        return df
-
-    #tested with 50 igmp joins on CiaB setup
-    @deferred(1000)
-    def test_scale_of_igmp_joins_for_multicast_groups_validating_cpu_usage_after_app_deactivation_and_activation(self,group_count=2000):
-	"""
-	Test-Method:
-	1. Register 2000 (Number to test) igmp groups in onos
-	2. Send  igmp joins for registered groups
-	3. Send multicast traffic to all registered groups
-	4. Verify traffic forwards properly
-	"""
-        df = defer.Deferred()
-        def scale_igmp_joins(df):
-	    cpu_usage1 = scale().get_system_cpu_usage()
-            OnosCtrl(self.igmp_app).activate()
-            groups = scale().generate_random_multicast_ip_addresses(count = group_count)
-            sources = scale().generate_random_unicast_ip_addresses(count = group_count)
-            scale().onos_ssm_table_load(groups,src_list=sources,flag=True)
-	    try:
-                for index in range(group_count):
-                    scale().send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-                    status = scale().verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, True)
-                    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-                    if index % 50 == 0:
-                        cpu_usage = self.get_system_cpu_usage()
-                        log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
-		cpu_usage2 = scale().get_system_cpu_usage()
-                OnosCtrl(self.igmp_app).deactivate()
-                time.sleep(2)
-                cpu_usage3 = scale().get_system_cpu_usage()
-                log.info('CPU usage before test start = %f after %d igmp entries registered in onos = %f and after the app deactivated = %f are'%(cpu_usage1,cpu_usage2,cpu_usage3))
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_joins, df)
-        return df
-
-    #tested with 100 flow entries on CiaB setup
-    @deferred(1000)
-    def test_scale_adding_large_number_of_flow_entries_for_tcp_ports(self,count=1000):
-	"""
-	Test-Method:
-	1. Add 1000 (Large number to test) flow entries with varying tcp port number in onos
-	2. Send data traffic for added tcp port numbers
-	3. Verify onos forwards data traffic properly
-	"""
-	scale().flows_setup()
-        df = defer.Deferred()
-        def scale_flow_entries(df):
-            egress = 1
-            ingress = 2
-            egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'tcp_port': random.randint(1024,65535) }
-            ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'tcp_port': random.randint(1024,65535) }
-	    try:
-                for index in range(0,count):
-                    ingress_map['tcp_port'] = random.randint(1024,65535)
-                    egress_map['tcp_port'] = random.randint(1024,65535)
-		    src_port = ingress_map['tcp_port']
-		    egr_port = egress_map['tcp_port']
-		    #log.info('ingress port is %d and egress port is %d'%(src_port,egr_port))
-                    flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress + scale().port_offset,
-                                ingressPort = ingress + scale().port_offset,
-                                tcpSrc = ingress_map['tcp_port'],
-                                tcpDst = egress_map['tcp_port']
-                                )
-                    result = flow.addFlow()
-                    assert_equal(result, True)
-                    log_test.info("flow number = %d is added",index+1)
-                    def mac_recv_task():
-                        def recv_cb(pkt):
-                            log_test.info('Pkt seen with ingress TCP port %s, egress TCP port %s' %(pkt[TCP].sport, pkt[TCP].dport))
-                            result = True
-                        sniff(count=2, timeout=5,
-                                      lfilter = lambda p: TCP in p and p[TCP].dport == egr_port and p[TCP].sport == src_port                                                         ,prn = recv_cb, iface = scale().port_map[egress])
-                    t = threading.Thread(target = mac_recv_task)
-                    t.start()
-                    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-                    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-                    L4 = TCP(sport = src_port, dport = egr_port)
-                    pkt = L2/L3/L4
-                    log_test.info('Sending packets to verify if flows are correct')
-                    sendp(pkt, count=50, iface = scale().port_map[ingress])
-                    t.join()
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0,scale_flow_entries, df)
-        return df
-
-    #tested with 100 flow entries on CiaB setup
-    @deferred(1000)
-    def test_scale_adding_ip_flow_entries_validating_cpu_usage(self,count=5000):
-	"""
-	Test-Method:
-	1. Add 5000(Edit count as per test requirement) flow entries with varying source and destination IP
-	2. Send data traffic matching flow entries
-	3. Verify onos forwards data traffic properly
-	"""
-	scale().flows_setup()
-        df = defer.Deferred()
-        def scale_flow_entries(df):
-            cpu_usage1 = scale().get_system_cpu_usage()
-            egress = 1
-            ingress = 2
-            egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '182.0.0.0' }
-            ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.0.0.0' }
-	    try:
-                for index in range(0,count):
-                    ingress_map['ip'] =  scale().generate_random_unicast_ip_addresses()[0] #next_ip(ingress_map['ip'])
-                    assert_not_equal(ingress_map['ip'], None)
-                    egress_map['ip'] =  scale().generate_random_unicast_ip_addresses()[0] #to_egress_ip(ingress_map['ip'])
-                    flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress + scale().port_offset,
-                                ingressPort = ingress + scale().port_offset,
-                                ethType = '0x0800',
-                                ipSrc = ('IPV4_SRC', ingress_map['ip']+'/8'),
-                                ipDst = ('IPV4_DST', egress_map['ip']+'/8')
-                                )
-                    if index % 50 == 0:
-                        cpu_usage = scale().get_system_cpu_usage()
-                        log.info('CPU usage is %s for flow number %d added'%(cpu_usage,index+1))
-                        time.sleep(1)
-                    def mac_recv_task():
-                        def recv_cb(pkt):
-                            log_test.info('Pkt seen with ingress source IP %s, destination IP %s' %(pkt[IP].src, pkt[IP].dst))
-                            result = True
-                        sniff(count=2, timeout=5,
-                                      lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']                                                         ,prn = recv_cb, iface = scale().port_map[egress])
-                    t = threading.Thread(target = mac_recv_task)
-                    t.start()
-                    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-                    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-                    pkt = L2/L3
-                    log_test.info('Sending packets to verify if flows are correct')
-                    sendp(pkt, count=50, iface = scale().port_map[ingress])
-                    t.join()
-                cpu_usage2 = scale().get_system_cpu_usage()
-                log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_flow_entries, df)
-        return df
-
-    #tested with 100 flow entries on CiaB setup
-    @deferred(1000)
-    def test_scale_adding_flow_entries_with_udp_ports(self,count=10000):
-	"""
-	Test-Method:
-	1. Add 10000 (Number as per test requirement)flow entries with varying udp port number in onos
-	2. Send data traffic matching flow entries
-	3. Verify onos forwards data traffic properly
-	"""
-        scale().flows_setup()
-        df = defer.Deferred()
-        def scale_flow_entries(df):
-            egress = 1
-            ingress = 2
-            egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1', 'udp_port': random.randint(1024,65535) }
-            ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1', 'udp_port': random.randint(1024,65535) }
-            try:
-                for index in range(0,count):
-                    ingress_map['udp_port'] = random.randint(1024,65535)
-                    egress_map['udp_port'] = random.randint(1024,65535)
-                    src_port = ingress_map['udp_port']
-                    egr_port = egress_map['udp_port']
-                    #log.info('ingress port is %d and egress port is %d'%(src_port,egr_port))
-                    flow = OnosFlowCtrl(deviceId = self.device_id,
-                                egressPort = egress + scale().port_offset,
-                                ingressPort = ingress + scale().port_offset,
-                                udpSrc = ingress_map['udp_port'],
-                                udpDst = egress_map['udp_port']
-                                )
-                    result = flow.addFlow()
-                    assert_equal(result, True)
-                    log_test.info("flow number = %d is added",index+1)
-                    def mac_recv_task():
-                        def recv_cb(pkt):
-                            log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
-                            result = True
-                        sniff(count=2, timeout=5,
-                                      lfilter = lambda p: UDP in p and p[UDP].dport == egr_port and p[UDP].sport == src_port                                                         ,prn = recv_cb, iface = scale().port_map[egress])
-                    t = threading.Thread(target = mac_recv_task)
-                    t.start()
-                    L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
-                    L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
-                    L4 = UDP(sport = src_port, dport = egr_port)
-                    pkt = L2/L3/L4
-                    log_test.info('Sending packets to verify if flows are correct')
-                    sendp(pkt, count=50, iface = scale().port_map[ingress])
-                    t.join()
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0,scale_flow_entries, df)
-	return df
-
-    #tested with 100 flow entries on CiaB setup
-    @deferred(1000)
-    def test_scale_adding_constant_destination_mac_flow_entries_validating_cpu_usage(self,count=100):
-	"""
-	Test-Method:
-	1. Add 100(Change number as per requirement) flow entries with varying source mac
-	2. Send data traffic matching flow entries
-	3. Verify onos forwards data traffic properly
-	"""
-	scale().flows_setup()
-        df = defer.Deferred()
-        def scale_flow_entries(df):
-            cpu_usage1 = self.get_system_cpu_usage()
-            egress = 1
-            ingress = 2
-            egress_mac = '02:00:00:00:0:0'
-            ingress_mac = '03:00:00:00:00:00'
-	    try:
-                for index in range(0,count):
-		    result = False
-                    ingress_mac = scale().next_mac(ingress_mac)
-                    flow = OnosFlowCtrl(deviceId = self.device_id,
-                        egressPort = egress + scale().port_offset,
-                        ingressPort = ingress + scale().port_offset,
-                        ethSrc = ingress_mac,
-                        ethDst = egress_mac)
-                    result = flow.addFlow()
-                    assert_equal(result, True)
-                    log.info("flow number = %d is added",index+1)
-                    if index % 100 == 0:
-                        cpu_usage = scale().get_system_cpu_usage()
-                        log.info('CPU usage is %s for multicast group entries %s'%(cpu_usage,index+1))
-                        time.sleep(1)
-                    def mac_recv_task():
-                        def recv_cb(pkt):
-                            log_test.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src , pkt.dst))
-                            result = True
-                        sniff(count=2, timeout=5,
-                                      lfilter = lambda p: p.src == ingress_mac and p.dst == egress_mac                                                         ,prn = recv_cb, iface = scale().port_map[egress])
-                    t = threading.Thread(target = mac_recv_task)
-                    t.start()
-                    L2 = Ether(src = ingress_mac, dst = egress_mac)
-                    pkt = L2/IP()
-                    log_test.info('Sending packets to verify if flows are correct')
-                    sendp(pkt, count=50, iface = scale().port_map[ingress])
-                    t.join()
-		    assert_equal(result, True)
-                cpu_usage2 = self.get_system_cpu_usage()
-                log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
-            except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0,scale_flow_entries, df)
-        return df
-
-
-    @deferred(1000)
-    def test_scale_adding_acl_rules_to_deny_matching_destination_tcp_port_traffic(self,count=10000):
-	"""
-	Test-Method:
-	1. Add 10000 (Adjust number as per test requirement)acl deny rules with varying tcp port number
-	2. Send data traffic matching flow entries
-	3. Verify onos drops data traffic properly
-	"""
-        df = defer.Deferred()
-        def scale_acl_rules(df):
-		    acl_rule = ACLTest()
-		    ingress = self.ingress_iface
-            egress = self.CURRENT_PORT_NUM
-			status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-            self.CURRENT_PORT_NUM += 1
-            time.sleep(5)
-			assert_equal(status, True)
-			srcMac = '00:00:00:00:00:11'
-            dstMac = host_ip_mac[0][1]
-            scale().acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-			try:
-                for index in range(0,count):
-				    status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-                    self.CURRENT_PORT_NUM += 1
-                    src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_port = random.randint(1024,65535)
-                    log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
-                    status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
-                    assert_equal(status, True)
-					self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp = src_ip, dstIp = dst_ip,ingress =ingress, egress = egress, ip_proto = 'TCP',positive_test = False)
-					scale().acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-		    except Exception as error:
-                log.info('Got unexpected error %s'%error)
-				self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_vsg_vms, df)
-        return df
-
-    @deferred(1000)
-    def test_scale_adding_acl_rules_to_allow_src_and_dst_ip_matching_traffic_validating_cpu_usage(self,count=10000):
-	"""
-	Test-Method:
-	1. Grep system usage before starting test case
-	2. Configure 10000(As per test requirement) acl rules in onos
-	3. Verify traffic test for all 10000 acl rules configured
-	4. Grep system usage again now
-	"""
-        df = defer.Deferred()
-        def scale_acl_rules(df):
-            cpu_usage1 = self.get_system_cpu_usage()
-		    ingress = self.ingress_iface
-            egress = self.CURRENT_PORT_NUM
-			status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-            self.CURRENT_PORT_NUM += 1
-            time.sleep(5)
-			assert_equal(status, True)
-			srcMac = '00:00:00:00:00:11'
-            dstMac = host_ip_mac[0][1]
-            self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-            acl_rule = ACLTest()
-			try:
-                for index in range(0,count):
-				    status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-                    self.CURRENT_PORT_NUM += 1
-                    src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_port = random.randint(1024,65535)
-                    log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
-                    status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
-                    assert_equal(status, True)
-					self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 456)
-                    if index % 100 == 0:
-                        cpu_usage = self.get_system_cpu_usage()
-                        log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
-                        time.sleep(1)
-				    self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-			except Exception as error:
-                log.info('Got unexpected error %s'%error)
-				self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-                raise
-            cpu_usage2 = self.get_system_cpu_usage()
-            log.info('system cpu usage before flows added = %f and after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
-            df.callback(0)
-        reactor.callLater(0, scale_acl_rules, df)
-        return df
-
-    @deferred(1000)
-    def test_scale_adding_and_deleting_acl_rules_to_allow_src_and_dst_ip_matching_traffic(self,count=10000):
-        """
-	Test-Method:
-	1. Add 10000 (Number as per requirement)acl rules to allow source and destinaiton IP matching traffic
-	2. Send acl rules matching traffic
-	3. Delete all the added acl rules
-	"""
-        df = defer.Deferred()
-        def scale_acl_rules(df):
-		    ingress = self.ingress_iface
-            egress = self.CURRENT_PORT_NUM
-			status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-            self.CURRENT_PORT_NUM += 1
-            time.sleep(5)
-			assert_equal(status, True)
-			srcMac = '00:00:00:00:00:11'
-            dstMac = host_ip_mac[0][1]
-            self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-            acl_rule = ACLTest()
-			try:
-                for index in range(0,count):
-				    status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-                    self.CURRENT_PORT_NUM += 1
-                    src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_port = random.randint(1024,65535)
-                    log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
-                    status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
-                    assert_equal(status, True)
-					self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 456)
-                result = acl_rule.get_acl_rules()
-                result = result.json()['aclRules']
-                for acl in result:
-                    acl_rule.remove_acl_rule(acl['id'])
-                    log.info('removed acl with Id --> %s'%acl['id'])
-				self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-			except Exception as error:
-                log.info('Got unexpected error %s'%error)
-				self.acl_hosts_remove(egress_iface_count = 1,  egress_iface_num = egress)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_acl_rules, df)
-        return df
-
-    @deferred(1000)
-    def test_scale_adding_acl_rules_to_deny_src_and_dst_ip_matching_traffic_toggling_acl_app(self,count=20000):
-	"""
-	Test-Method:
-	1. Add 20000 (Number as test requirement)acl rules to allow source and destinaiton IP matching traffic
-	2. Send acl rules matching traffic
-	3. Verify onos drops the traffic as the rule is deny type
-	4. Deactivate the acl app in onos
-	4. Verify now onos forwards the traffic
-	"""
-        df = defer.Deferred()
-        def scale_acl_rules(df):
-		    ingress = self.ingress_iface
-            egress = self.CURRENT_PORT_NUM
-			status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-            self.CURRENT_PORT_NUM += 1
-            time.sleep(5)
-			assert_equal(status, True)
-			srcMac = '00:00:00:00:00:11'
-            dstMac = host_ip_mac[0][1]
-            self.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1,  egress_iface_num = egress )
-            acl_rule = ACLTest()
-            try:
-                for index in range(0,count):
-				    status,code,host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.HOST_DST_IP)
-                    self.CURRENT_PORT_NUM += 1
-                    src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_port = random.randint(1024,65535)
-                    log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
-                    status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'deny')
-                    assert_equal(status, True)
-					self.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.ACL_SRC_IP, dstIp = self.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', dstPortNum = 456)
-                OnosCtrl(cls.acl_app).deactivate()
-                time.sleep(3)
-			except Exception as error:
-                log.info('Got unexpected error %s'%error)
-                raise
-            df.callback(0)
-        reactor.callLater(0, scale_acl_rules, df)
-        return df
-
-    @deferred(1000)
-    def test_scale_adding_igmp_and_acl_with_flow_entries_and_check_cpu_usage(self,igmp_groups=1300, flows_count=10000):
-	"""
-	Test-Method:
-	1. Add igmp and flow entries in onos
-	2. Send igmp joins for corresponding igmp entries
-	3. Send multicast data traffic to registered igmp groups
-	3. Verify onos forwards the traffic
-	4. Send traffic matching the flow entries
-	4. Verify onos forwards the traffic
-	"""
-        df = defer.Deferred()
-        def scale_igmp_acl_flows(df):
-            cpu_usage1 = self.get_system_cpu_usage()
-            egress = 1
-            ingress = 2
-            egress_mac = '00:00:00:00:01:01'
-            ingress_mac = '02:00:00:00:00:00'
-            acl_rule = ACLTest()
-            OnosCtrl(self.igmp_app).activate()
-            groups = self.generate_random_multicast_ip_addresses(count = igmp_groups)
-            sources = self.generate_random_unicast_ip_addresses(count = igmp_groups)
-            self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-            for index in range(igmp_groups):
-                self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                         iface = self.V_INF1)
-                status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                assert_equal(status, True)
-                log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-            for index in range(flows_count):
-                src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                log.info('adding acl rule = %d with src ip = %s, dst ip = %s '%(index+1, src_ip,dst_ip))
-                status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip,action = 'allow')
-                assert_equal(status, True)
-                ingress_mac = self.next_mac(ingress_mac)
-                flow = OnosFlowCtrl(deviceId = self.device_id,
-                        egressPort = egress + self.port_offset,
-                        ingressPort = ingress + self.port_offset,
-                        ethSrc = ingress_mac,
-                        ethDst = egress_mac)
-                result = flow.addFlow()
-                assert_equal(result, True)
-                log.info("flow number = %d is added",index+1)
-                if index % 200 == 0:
-                    cpu_usage = self.get_system_cpu_usage()
-                    log.info('CPU usage is %s for acl rule number %s'%(cpu_usage,index+1))
-                    time.sleep(1)
-            cpu_usage2 = self.get_system_cpu_usage()
-            log.info('system cpu usage before flows added = %f, after %d flows added = %f'%(cpu_usage1,count,cpu_usage2))
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_acl_flows, df)
-        return df
-
-    @deferred(1000)
-    def test_scale_adding_igmp_acl_and_flow_entries_and_simultaneously_toggling_app(self,igmp_groups=1300, flows_count=10000):
-	"""
-	Test-Method:
-	1. Add igmp, acl and flow entries in onos
-	2. Send igmp joins for corresponding igmp entries
-	3. Send multicast data traffic to registered igmp groups
-	3. Verify onos forwards the traffic
-	4. Send traffic matching the flow entries
-	4. Verify onos forwards the traffic
-	5. Send traffic matching acl rules
-	6. Verify onos forwards the traffic
-	"""
-        df = defer.Deferred()
-        def scale_igmp_acl_flows(df):
-            cpu_usage1 = self.get_system_cpu_usage()
-            def adding_igmp_entries():
-                OnosCtrl(self.igmp_app).activate()
-                groups = self.generate_random_multicast_ip_addresses(count = igmp_groups)
-                sources = self.generate_random_unicast_ip_addresses(count = igmp_groups)
-                self.onos_ssm_table_load(groups,src_list=sources,flag=True)
-                for index in range(igmp_groups):
-                    self.send_igmp_join(groups = [groups[index]], src_list = [sources[index]],record_type = IGMP_V3_GR_TYPE_INCLUDE,
-                                          iface = self.V_INF1)
-                    status = self.verify_igmp_data_traffic(groups[index],intf=self.V_INF1,source=sources[index])
-                    assert_equal(status, True)
-                    log_test.info('data received for group %s from source %s - %d'%(groups[index],sources[index],index))
-            def adding_flow_entries():
-                egress = 1
-                ingress = 2
-                egress_mac = '00:00:00:00:01:01'
-                ingress_mac = '02:00:00:00:00:00'
-                for index in range(flows_count):
-                    ingress_mac = self.next_mac(ingress_mac)
-                    flow = OnosFlowCtrl(deviceId = self.device_id,
-                        egressPort = egress + self.port_offset,
-                        ingressPort = ingress + self.port_offset,
-                        ethSrc = ingress_mac,
-                        ethDst = egress_mac)
-                    result = flow.addFlow()
-                    assert_equal(result, True)
-                    log.info("flow number = %d is added",index+1)
-            def adding_acl_entries():
-                OnosCtrl(self.acl_app).activate()
-                for index in range(flows_count):
-                    src_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_ip =  self.generate_random_unicast_ip_addresses(count=1)[0]+'/32'
-                    dst_port = random.randint(1024,65535)
-                    log.info('adding acl rule = %d with src ip = %s, dst ip = %s and dst tcp port = %d'%(index+1, src_ip,dst_ip,dst_port))
-                    status,code = acl_rule.adding_acl_rule('v4', srcIp=src_ip, dstIp = dst_ip, ipProto ='TCP', dstTpPort =dst_port, action = 'deny')
-                    assert_equal(status, True)
-            igmp_thread  = threading.Thread(target = adding_igmp_entries)
-            flows_thread  = threading.Thread(target = adding_flow_entries)
-            acl_thread  = threading.Thread(target = adding_acl_entries)
-            igmp_thread.start()
-            flows_thread.start()
-            acl_thread.start()
-            time.sleep(1)
-            igmp_thread.join()
-            flows_thread.join()
-            acl_thread.join()
-            cpu_usage2 = self.get_system_cpu_usage()
-            OnosCtrl(self.igmp_app).deactivate()
-            OnosCtrl(self.acl_app).deactivate()
-            cpu_usage3 = self.get_system_cpu_usage()
-            log.info('cpu usage before test start = %f, after igmp,flow and acl entries loaded = %f and after the apps deactivated = %f'%(cpu_usage1,cpu_usage2,cpu_usage3))
-            OnosCtrl(self.igmp_app).activate()
-            OnosCtrl(self.acl_app).activate()
-            df.callback(0)
-        reactor.callLater(0, scale_igmp_acl_flows, df)
-        return df
-
-    #tested with 100 routes on CiaB
-    @deferred(1000)
-    def test_scale_for_vrouter_with_large_number_of_routes_and_peers(self):
-	"""
-	Test-Method:
-	1. Add 100000 routes with 100 pairs in quagga(Change scale test number as per test requirement)
-	2. Verify routes pushed to onos  from quagga
-	3. Send traffic destined  the routes added
-	3. Verify onos forwards the traffic
-	"""
-        scale().vrouter_setup()
-        df = defer.Deferred()
-        def scale_vrouter_routes(df):
-	    try:
-                res = scale().vrouter_network_verify(10000, peers = 100)
-                assert_equal(res, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		raise
-            df.callback(0)
-        reactor.callLater(0, scale_vrouter_routes, df)
-        return df
-
-    #tested with 100 subscribers on CiaB
-    @deferred(1800)
-    def test_scale_of_eap_tls_with_huge_number_of_sessions_using_diff_mac(self):
-	"""
-	Test-Method:
-	1. Simulate eap authentication requests for 5000 users(Adjust user number as per requirement)
-	2. Verify authentication is succes for all 5000 users
-	"""
-	OnosCtrl('org.opencord.aaa').activate()
-        df = defer.Deferred()
-        def eap_tls_5k_with_diff_mac(df):
-	    try:
-                for i in xrange(5000):
-                    tls = TLSAuthTest(src_mac = 'random')
-                    tls.runTest()
-                    log_test.info('Authentication successfull for user %d'%i)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		raise
-            df.callback(0)
-        reactor.callLater(0, eap_tls_5k_with_diff_mac, df)
-        return df
-
-    #tested with 100 subscribers on CiaB
-    @deferred(1800)
-    def test_scale_of_eap_tls_with_huge_number_of_sessions_using_diff_mac_with_aaa_app_deactivation_and_activation(self):
-	"""
-	Test-Method:
-	1. Simulate eap authentication requests for 5000 users(Adjust user number as per requirement)
-	2. Verify authentication is succes for all 5000 users
-	3. Deactivate and activate the aaa app in onos
-	4. Simulate eap authentication requests for 5000 users
-	5. Verify authentication is succes for all 5000 users
-	"""
-	OnosCtrl('org.opencord.aaa').activate()
-	df = defer.Deferred()
-        def eap_tls_5k_with_diff_mac(df):
-	    try:
-		for i in xrange(5000):
-                    tls = TLSAuthTest(src_mac = 'random')
-                    tls.runTest()
-                    log_test.info('Authentication successfull for user %d'%i)
-	        OnosCtrl('org.opencord.aaa').deactivate()
-	        time.sleep(2)
-	        OnosCtrl('org.opencord.aaa').activate()
-                for i in xrange(100):
-                    tls = TLSAuthTest(src_mac = 'random')
-                    tls.runTest()
-                    log_test.info('Authentication successfull for user %d'%i)
-		OnosCtrl('org.opencord.aaa').activate()
-            except Exception as error:
-                log.info('Got Unexpected  error %s'%error)
-		OnosCtrl('org.opencord.aaa').activate()
-                raise
-            df.callback(0)
-        reactor.callLater(0, eap_tls_5k_with_diff_mac, df)
-        return df
-
-    #tested with 10 subscribers on CiaB
-    @deferred(1800)
-    def test_scale_for_cord_subscribers_authentication_with_valid_and_invalid_certificates_and_channel_surfing(self):
-	"""
-	Test-Method:
-	1. Simulate 5000 subscribers to get authentication access(Adjust cord subscribers according to test)
-	2. Send igmp joins from all the subcribers
-	3. Verify multicast traffic received to all 5000 subscribers
-	"""
-	scale().subscriber_setup()
-        df = defer.Deferred()
-        def cordsub_auth_invalid_cert(df):
-            num_subscribers = 2
-            num_channels = 1
-	    try:
-                test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (scale().tls_invalid_cert, scale().dhcp_verify, scale().igmp_verify),
-                                                        port_list = scale().generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'half')
-                assert_equal(test_status, True)
-            except Exception as error:
-                log.info('Got Unexpected  error %s'%error)
-		raise
-	    finally:
-                scale().subscriber_teardown()
-            df.callback(0)
-        reactor.callLater(0, cordsub_auth_invalid_cert, df)
-        return df
-
-    #tested with 10 subscribers on CiaB
-    @deferred(1800)
-    def test_scale_for_cord_subscribers_with_igmp_join_and_jump_for_multiple_channels(self):
-	"""
-        Test-Method:
-        1. Simulate 5000 subscribers(Adjust number as per test)
-        2. Send igmp joins from all the subcribers
-        3. Verify multicast traffic received to all 5000 subscribers
-        """
-        scale().subscriber_setup()
-        df = defer.Deferred()
-        def cordsub_igmp_join_jump(df):
-            num_subscribers = 5000
-            num_channels = 1500
-	    try:
-	        test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (scale().tls_verify, scale().dhcp_jump_verify, scale().igmp_jump_verify),
-                                                    port_list = scale().generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-                assert_equal(test_status, True)
-            except Exception as error:
-                log.info('Got Unexpected  error %s'%error)
-		raise
-            finally:
-                scale().subscriber_teardown()
-            df.callback(0)
-        reactor.callLater(0, cordsub_igmp_join_jump, df)
-	return df
-
-    #tested with 10 subscribers on CiaB
-    @deferred(1800)
-    def test_scale_for_cord_subscribers_authentication_with_valid_and_non_ca_authorized_certificates_and_channel_surfing(self):
-	"""
-        Test-Method:
-        1. Simulate 10000 subscribers to get authentication access(Adjust number as per test)
-        2. Send igmp joins from all the subcribers
-        3. Verify multicast traffic received to all 10000 subscribers
-        """
-	scale().subscriber_setup()
-        df = defer.Deferred()
-        def cordsub_auth_valid_cert(df):
-            num_subscribers = 10000
-            num_channels = 1
-	    try:
-                test_status = scale().subscriber_join_verify(num_subscribers = num_subscribers,
-                                                 num_channels = num_channels,
-                                                 cbs = (scale().tls_non_ca_authrized_cert, scale().dhcp_verify, scale().igmp_verify),
-                                                 port_list = scale().generate_port_list(num_subscribers, num_channels),
-                                                 negative_subscriber_auth = 'onethird')
-                assert_equal(test_status, True)
-            except Exception as error:
-                log.info('Got Unexpected  error %s'%error)
-		raise
-            finally:
-                scale().subscriber_teardown()
-            df.callback(0)
-        reactor.callLater(0, cordsub_auth_valid_cert, df)
-        return df
diff --git a/src/test/scapy/fields.py b/src/test/scapy/fields.py
deleted file mode 100644
index 1b16e6e..0000000
--- a/src/test/scapy/fields.py
+++ /dev/null
@@ -1,1042 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## This file is part of Scapy
-## See http://www.secdev.org/projects/scapy for more informations
-## Copyright (C) Philippe Biondi <phil@secdev.org>
-## This program is published under a GPLv2 license
-
-"""
-Fields: basic data structures that make up parts of packets.
-"""
-
-import struct,copy,socket
-from config import conf
-from volatile import *
-from data import *
-from utils import *
-from base_classes import BasePacket,Gen,Net
-
-
-############
-## Fields ##
-############
-
-class Field:
-    """For more informations on how this work, please refer to
-       http://www.secdev.org/projects/scapy/files/scapydoc.pdf
-       chapter ``Adding a New Field''"""
-    islist=0
-    holds_packets=0
-    def __init__(self, name, default, fmt="H"):
-        self.name = name
-        if fmt[0] in "@=<>!":
-            self.fmt = fmt
-        else:
-            self.fmt = "!"+fmt
-        self.default = self.any2i(None,default)
-        self.sz = struct.calcsize(self.fmt)
-        self.owners = []
-
-    def register_owner(self, cls):
-        self.owners.append(cls)
-
-    def i2len(self, pkt, x):
-        """Convert internal value to a length usable by a FieldLenField"""
-        return self.sz
-    def i2count(self, pkt, x):
-        """Convert internal value to a number of elements usable by a FieldLenField.
-        Always 1 except for list fields"""
-        return 1
-    def h2i(self, pkt, x):
-        """Convert human value to internal value"""
-        return x
-    def i2h(self, pkt, x):
-        """Convert internal value to human value"""
-        return x
-    def m2i(self, pkt, x):
-        """Convert machine value to internal value"""
-        return x
-    def i2m(self, pkt, x):
-        """Convert internal value to machine value"""
-        if x is None:
-            x = 0
-        return x
-    def any2i(self, pkt, x):
-        """Try to understand the most input values possible and make an internal value from them"""
-        return self.h2i(pkt, x)
-    def i2repr(self, pkt, x):
-        """Convert internal value to a nice representation"""
-        return repr(self.i2h(pkt,x))
-    def addfield(self, pkt, s, val):
-        """Add an internal value  to a string"""
-        return s+struct.pack(self.fmt, self.i2m(pkt,val))
-    def getfield(self, pkt, s):
-        """Extract an internal value from a string"""
-        return  s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, s[:self.sz])[0])
-    def do_copy(self, x):
-        if hasattr(x, "copy"):
-            return x.copy()
-        if type(x) is list:
-            x = x[:]
-            for i in xrange(len(x)):
-                if isinstance(x[i], BasePacket):
-                    x[i] = x[i].copy()
-        return x
-    def __repr__(self):
-        return "<Field (%s).%s>" % (",".join(x.__name__ for x in self.owners),self.name)
-    def copy(self):
-        return copy.deepcopy(self)
-    def randval(self):
-        """Return a volatile object whose value is both random and suitable for this field"""
-        fmtt = self.fmt[-1]
-        if fmtt in "BHIQ":
-            return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]()
-        elif fmtt == "s":
-            if self.fmt[0] in "0123456789":
-                l = int(self.fmt[:-1])
-            else:
-                l = int(self.fmt[1:-1])
-            return RandBin(l)
-        else:
-            warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt))
-
-
-
-
-class Emph:
-    fld = ""
-    def __init__(self, fld):
-        self.fld = fld
-    def __getattr__(self, attr):
-        return getattr(self.fld,attr)
-    def __hash__(self):
-        return hash(self.fld)
-    def __eq__(self, other):
-        return self.fld == other
-
-
-class ActionField:
-    _fld = None
-    def __init__(self, fld, action_method, **kargs):
-        self._fld = fld
-        self._action_method = action_method
-        self._privdata = kargs
-    def any2i(self, pkt, val):
-        getattr(pkt, self._action_method)(val, self._fld, **self._privdata)
-        return getattr(self._fld, "any2i")(pkt, val)
-    def __getattr__(self, attr):
-        return getattr(self._fld,attr)
-
-
-class ConditionalField:
-    fld = None
-    def __init__(self, fld, cond):
-        self.fld = fld
-        self.cond = cond
-    def _evalcond(self,pkt):
-        return self.cond(pkt)
-
-    def getfield(self, pkt, s):
-        if self._evalcond(pkt):
-            return self.fld.getfield(pkt,s)
-        else:
-            return s,None
-
-    def addfield(self, pkt, s, val):
-        if self._evalcond(pkt):
-            return self.fld.addfield(pkt,s,val)
-        else:
-            return s
-    def __getattr__(self, attr):
-        return getattr(self.fld,attr)
-
-
-class PadField:
-    """Add bytes after the proxified field so that it ends at the specified
-       alignment from its begining"""
-    _fld = None
-    def __init__(self, fld, align, padwith=None):
-        self._fld = fld
-        self._align = align
-        self._padwith = padwith or ""
-
-    def padlen(self, flen):
-        return -flen%self._align
-
-    def getfield(self, pkt, s):
-        remain,val = self._fld.getfield(pkt,s)
-        padlen = self.padlen(len(s)-len(remain))
-        return remain[padlen:], val
-
-    def addfield(self, pkt, s, val):
-        sval = self._fld.addfield(pkt, "", val)
-        return s+sval+struct.pack("%is" % (self.padlen(len(sval))), self._padwith)
-
-    def __getattr__(self, attr):
-        return getattr(self._fld,attr)
-
-
-class MACField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "6s")
-    def i2m(self, pkt, x):
-        if x is None:
-            return "\0\0\0\0\0\0"
-        return mac2str(x)
-    def m2i(self, pkt, x):
-        return str2mac(x)
-    def any2i(self, pkt, x):
-        if type(x) is str and len(x) is 6:
-            x = self.m2i(pkt, x)
-        return x
-    def i2repr(self, pkt, x):
-        x = self.i2h(pkt, x)
-        if self in conf.resolve:
-            x = conf.manufdb._resolve_MAC(x)
-        return x
-    def randval(self):
-        return RandMAC()
-
-
-class IPField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "4s")
-    def h2i(self, pkt, x):
-        if type(x) is str:
-            try:
-                inet_aton(x)
-            except socket.error:
-                x = Net(x)
-        elif type(x) is list:
-            x = [self.h2i(pkt, n) for n in x]
-        return x
-    def resolve(self, x):
-        if self in conf.resolve:
-            try:
-                ret = socket.gethostbyaddr(x)[0]
-            except:
-                pass
-            else:
-                if ret:
-                    return ret
-        return x
-    def i2m(self, pkt, x):
-        return inet_aton(x)
-    def m2i(self, pkt, x):
-        return inet_ntoa(x)
-    def any2i(self, pkt, x):
-        return self.h2i(pkt,x)
-    def i2repr(self, pkt, x):
-        return self.resolve(self.i2h(pkt, x))
-    def randval(self):
-        return RandIP()
-
-class SourceIPField(IPField):
-    def __init__(self, name, dstname):
-        IPField.__init__(self, name, None)
-        self.dstname = dstname
-    def i2m(self, pkt, x):
-        if x is None:
-            iff,x,gw = pkt.route()
-            if x is None:
-                x = "0.0.0.0"
-        return IPField.i2m(self, pkt, x)
-    def i2h(self, pkt, x):
-        if x is None:
-            dst=getattr(pkt,self.dstname)
-            if isinstance(dst,Gen):
-                r = map(conf.route.route, dst)
-                r.sort()
-                if r[0] != r[-1]:
-                    warning("More than one possible route for %s"%repr(dst))
-                iff,x,gw = r[0]
-            else:
-                iff,x,gw = conf.route.route(dst)
-        return IPField.i2h(self, pkt, x)
-
-
-
-
-class ByteField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "B")
-
-class XByteField(ByteField):
-    def i2repr(self, pkt, x):
-        return lhex(self.i2h(pkt, x))
-
-class OByteField(ByteField):
-    def i2repr(self, pkt, x):
-        return "%03o"%self.i2h(pkt, x)
-
-class X3BytesField(XByteField):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "!I")
-    def addfield(self, pkt, s, val):
-        return s+struct.pack(self.fmt, self.i2m(pkt,val))[1:4]
-    def getfield(self, pkt, s):
-        return  s[3:], self.m2i(pkt, struct.unpack(self.fmt, "\x00"+s[:3])[0])
-
-class ThreeBytesField(X3BytesField, ByteField):
-    def i2repr(self, pkt, x):
-        return ByteField.i2repr(self, pkt, x)
-
-class ShortField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "H")
-
-class SignedShortField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "h")
-
-class LEShortField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "<H")
-
-class XShortField(ShortField):
-    def i2repr(self, pkt, x):
-        return lhex(self.i2h(pkt, x))
-
-
-class IntField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "I")
-
-class SignedIntField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "i")
-    def randval(self):
-        return RandSInt()
-
-class LEIntField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "<I")
-
-class LESignedIntField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "<i")
-    def randval(self):
-        return RandSInt()
-
-class XIntField(IntField):
-    def i2repr(self, pkt, x):
-        return lhex(self.i2h(pkt, x))
-
-
-class LongField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "Q")
-
-class XLongField(LongField):
-    def i2repr(self, pkt, x):
-        return lhex(self.i2h(pkt, x))
-
-class IEEEFloatField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "f")
-
-class IEEEDoubleField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "d")
-
-
-class StrField(Field):
-    def __init__(self, name, default, fmt="H", remain=0):
-        Field.__init__(self,name,default,fmt)
-        self.remain = remain
-    def i2len(self, pkt, i):
-        return len(i)
-    def i2m(self, pkt, x):
-        if x is None:
-            x = ""
-        elif type(x) is not str:
-            x=str(x)
-        return x
-    def addfield(self, pkt, s, val):
-        return s+self.i2m(pkt, val)
-    def getfield(self, pkt, s):
-        if self.remain == 0:
-            return "",self.m2i(pkt, s)
-        else:
-            return s[-self.remain:],self.m2i(pkt, s[:-self.remain])
-    def randval(self):
-        return RandBin(RandNum(0,1200))
-
-class PacketField(StrField):
-    holds_packets=1
-    def __init__(self, name, default, cls, remain=0):
-        StrField.__init__(self, name, default, remain=remain)
-        self.cls = cls
-    def i2m(self, pkt, i):
-        return str(i)
-    def m2i(self, pkt, m):
-        return self.cls(m)
-    def getfield(self, pkt, s):
-        i = self.m2i(pkt, s)
-        remain = ""
-        if conf.padding_layer in i:
-            r = i[conf.padding_layer]
-            del(r.underlayer.payload)
-            remain = r.load
-        return remain,i
-
-class PacketLenField(PacketField):
-    def __init__(self, name, default, cls, length_from=None):
-        PacketField.__init__(self, name, default, cls)
-        self.length_from = length_from
-    def getfield(self, pkt, s):
-        l = self.length_from(pkt)
-        try:
-            i = self.m2i(pkt, s[:l])
-        except Exception:
-            if conf.debug_dissector:
-                raise
-            i = conf.raw_layer(load=s[:l])
-        return s[l:],i
-
-
-class PacketListField(PacketField):
-    islist = 1
-    def __init__(self, name, default, cls, count_from=None, length_from=None):
-        if default is None:
-            default = []  # Create a new list for each instance
-        PacketField.__init__(self, name, default, cls)
-        self.count_from = count_from
-        self.length_from = length_from
-
-
-    def any2i(self, pkt, x):
-        if type(x) is not list:
-            return [x]
-        else:
-            return x
-    def i2count(self, pkt, val):
-        if type(val) is list:
-            return len(val)
-        return 1
-    def i2len(self, pkt, val):
-        return sum( len(p) for p in val )
-    def do_copy(self, x):
-        if x is None:
-            return None
-        else:
-            return [p if isinstance(p, basestring) else p.copy() for p in x]
-    def getfield(self, pkt, s):
-        c = l = None
-        if self.length_from is not None:
-            l = self.length_from(pkt)
-        elif self.count_from is not None:
-            c = self.count_from(pkt)
-
-        lst = []
-        ret = ""
-        remain = s
-        if l is not None:
-            remain,ret = s[:l],s[l:]
-        while remain:
-            if c is not None:
-                if c <= 0:
-                    break
-                c -= 1
-            try:
-                p = self.m2i(pkt,remain)
-            except Exception:
-                if conf.debug_dissector:
-                    raise
-                p = conf.raw_layer(load=remain)
-                remain = ""
-            else:
-                if conf.padding_layer in p:
-                    pad = p[conf.padding_layer]
-                    remain = pad.load
-                    del(pad.underlayer.payload)
-                else:
-                    remain = ""
-            lst.append(p)
-        return remain+ret,lst
-    def addfield(self, pkt, s, val):
-        return s+"".join(map(str, val))
-
-
-class StrFixedLenField(StrField):
-    def __init__(self, name, default, length=None, length_from=None):
-        StrField.__init__(self, name, default)
-        self.length_from  = length_from
-        if length is not None:
-            self.length_from = lambda pkt,length=length: length
-    def i2repr(self, pkt, v):
-        if type(v) is str:
-            v = v.rstrip("\0")
-        return repr(v)
-    def getfield(self, pkt, s):
-        l = self.length_from(pkt)
-        return s[l:], self.m2i(pkt,s[:l])
-    def addfield(self, pkt, s, val):
-        l = self.length_from(pkt)
-        return s+struct.pack("%is"%l,self.i2m(pkt, val))
-    def randval(self):
-        try:
-            l = self.length_from(None)
-        except:
-            l = RandNum(0,200)
-        return RandBin(l)
-
-class StrFixedLenEnumField(StrFixedLenField):
-    def __init__(self, name, default, length=None, enum=None, length_from=None):
-        StrFixedLenField.__init__(self, name, default, length=length, length_from=length_from)
-        self.enum = enum
-    def i2repr(self, pkt, v):
-        r = v.rstrip("\0")
-        rr = repr(r)
-        if v in self.enum:
-            rr = "%s (%s)" % (rr, self.enum[v])
-        elif r in self.enum:
-            rr = "%s (%s)" % (rr, self.enum[r])
-        return rr
-
-class NetBIOSNameField(StrFixedLenField):
-    def __init__(self, name, default, length=31):
-        StrFixedLenField.__init__(self, name, default, length)
-    def i2m(self, pkt, x):
-        l = self.length_from(pkt)/2
-        if x is None:
-            x = ""
-        x += " "*(l)
-        x = x[:l]
-        x = "".join(map(lambda x: chr(0x41+(ord(x)>>4))+chr(0x41+(ord(x)&0xf)), x))
-        x = " "+x
-        return x
-    def m2i(self, pkt, x):
-        x = x.strip("\x00").strip(" ")
-        return "".join(map(lambda x,y: chr((((ord(x)-1)&0xf)<<4)+((ord(y)-1)&0xf)), x[::2],x[1::2]))
-
-class StrLenField(StrField):
-    def __init__(self, name, default, fld=None, length_from=None):
-        StrField.__init__(self, name, default)
-        self.length_from = length_from
-    def getfield(self, pkt, s):
-        l = self.length_from(pkt)
-        return s[l:], self.m2i(pkt,s[:l])
-
-class BoundStrLenField(StrLenField):
-    def __init__(self,name, default, minlen= 0, maxlen= 255, fld=None, length_from=None):
-        StrLenField.__init__(self, name, default, fld, length_from)
-        self.minlen= minlen
-        self.maxlen= maxlen
-
-    def randval(self):
-        return RandBin(RandNum(self.minlen, self.maxlen))
-
-class FieldListField(Field):
-    islist=1
-    def __init__(self, name, default, field, length_from=None, count_from=None):
-        if default is None:
-            default = []  # Create a new list for each instance
-        self.field = field
-        Field.__init__(self, name, default)
-        self.count_from = count_from
-        self.length_from = length_from
-
-    def i2count(self, pkt, val):
-        if type(val) is list:
-            return len(val)
-        return 1
-    def i2len(self, pkt, val):
-        return sum( self.field.i2len(pkt,v) for v in val )
-
-    def i2m(self, pkt, val):
-        if val is None:
-            val = []
-        return val
-    def any2i(self, pkt, x):
-        if type(x) is not list:
-            return [self.field.any2i(pkt, x)]
-        else:
-            return map(lambda e, pkt=pkt: self.field.any2i(pkt, e), x)
-    def i2repr(self, pkt, x):
-        return map(lambda e, pkt=pkt: self.field.i2repr(pkt,e), x)
-    def addfield(self, pkt, s, val):
-        val = self.i2m(pkt, val)
-        for v in val:
-            s = self.field.addfield(pkt, s, v)
-        return s
-    def getfield(self, pkt, s):
-        c = l = None
-        if self.length_from is not None:
-            l = self.length_from(pkt)
-        elif self.count_from is not None:
-            c = self.count_from(pkt)
-
-        val = []
-        ret=""
-        if l is not None:
-            s,ret = s[:l],s[l:]
-
-        while s:
-            if c is not None:
-                if c <= 0:
-                    break
-                c -= 1
-            s,v = self.field.getfield(pkt, s)
-            val.append(v)
-        return s+ret, val
-
-class FieldLenField(Field):
-    def __init__(self, name, default,  length_of=None, fmt = "H", count_of=None, adjust=lambda pkt,x:x, fld=None):
-        Field.__init__(self, name, default, fmt)
-        self.length_of=length_of
-        self.count_of=count_of
-        self.adjust=adjust
-        if fld is not None:
-            FIELD_LENGTH_MANAGEMENT_DEPRECATION(self.__class__.__name__)
-            self.length_of = fld
-    def i2m(self, pkt, x):
-        if x is None:
-            if self.length_of is not None:
-                fld,fval = pkt.getfield_and_val(self.length_of)
-                f = fld.i2len(pkt, fval)
-            else:
-                fld,fval = pkt.getfield_and_val(self.count_of)
-                f = fld.i2count(pkt, fval)
-            x = self.adjust(pkt,f)
-        return x
-
-class StrNullField(StrField):
-    def addfield(self, pkt, s, val):
-        return s+self.i2m(pkt, val)+"\x00"
-    def getfield(self, pkt, s):
-        l = s.find("\x00")
-        if l < 0:
-            #XXX \x00 not found
-            return "",s
-        return s[l+1:],self.m2i(pkt, s[:l])
-    def randval(self):
-        return RandTermString(RandNum(0,1200),"\x00")
-
-class StrStopField(StrField):
-    def __init__(self, name, default, stop, additionnal=0):
-        Field.__init__(self, name, default)
-        self.stop=stop
-        self.additionnal=additionnal
-    def getfield(self, pkt, s):
-        l = s.find(self.stop)
-        if l < 0:
-            return "",s
-#            raise Scapy_Exception,"StrStopField: stop value [%s] not found" %stop
-        l += len(self.stop)+self.additionnal
-        return s[l:],s[:l]
-    def randval(self):
-        return RandTermString(RandNum(0,1200),self.stop)
-
-class LenField(Field):
-    def i2m(self, pkt, x):
-        if x is None:
-            x = len(pkt.payload)
-        return x
-
-class BCDFloatField(Field):
-    def i2m(self, pkt, x):
-        return int(256*x)
-    def m2i(self, pkt, x):
-        return x/256.0
-
-class BitField(Field):
-    def __init__(self, name, default, size):
-        Field.__init__(self, name, default)
-        self.rev = size < 0
-        self.size = abs(size)
-    def reverse(self, val):
-        if self.size == 16:
-            val = socket.ntohs(val)
-        elif self.size == 32:
-            val = socket.ntohl(val)
-        return val
-
-    def addfield(self, pkt, s, val):
-        val = self.i2m(pkt, val)
-        if type(s) is tuple:
-            s,bitsdone,v = s
-        else:
-            bitsdone = 0
-            v = 0
-        if self.rev:
-            val = self.reverse(val)
-        v <<= self.size
-        v |= val & ((1L<<self.size) - 1)
-        bitsdone += self.size
-        while bitsdone >= 8:
-            bitsdone -= 8
-            s = s+struct.pack("!B", v >> bitsdone)
-            v &= (1L<<bitsdone)-1
-        if bitsdone:
-            return s,bitsdone,v
-        else:
-            return s
-    def getfield(self, pkt, s):
-        if type(s) is tuple:
-            s,bn = s
-        else:
-            bn = 0
-        # we don't want to process all the string
-        nb_bytes = (self.size+bn-1)/8 + 1
-        w = s[:nb_bytes]
-
-        # split the substring byte by byte
-        bytes = struct.unpack('!%dB' % nb_bytes , w)
-
-        b = 0L
-        for c in range(nb_bytes):
-            b |= long(bytes[c]) << (nb_bytes-c-1)*8
-
-        # get rid of high order bits
-        b &= (1L << (nb_bytes*8-bn)) - 1
-
-        # remove low order bits
-        b = b >> (nb_bytes*8 - self.size - bn)
-
-        if self.rev:
-            b = self.reverse(b)
-
-        bn += self.size
-        s = s[bn/8:]
-        bn = bn%8
-        b = self.m2i(pkt, b)
-        if bn:
-            return (s,bn),b
-        else:
-            return s,b
-    def randval(self):
-        return RandNum(0,2**self.size-1)
-
-
-class BitFieldLenField(BitField):
-    def __init__(self, name, default, size, length_of=None, count_of=None, adjust=lambda pkt,x:x):
-        BitField.__init__(self, name, default, size)
-        self.length_of=length_of
-        self.count_of=count_of
-        self.adjust=adjust
-    def i2m(self, pkt, x):
-        return FieldLenField.i2m.im_func(self, pkt, x)
-
-
-class XBitField(BitField):
-    def i2repr(self, pkt, x):
-        return lhex(self.i2h(pkt,x))
-
-
-class EnumField(Field):
-    def __init__(self, name, default, enum, fmt = "H"):
-        i2s = self.i2s = {}
-        s2i = self.s2i = {}
-        if type(enum) is list:
-            keys = xrange(len(enum))
-        else:
-            keys = enum.keys()
-        if filter(lambda x: type(x) is str, keys):
-            i2s,s2i = s2i,i2s
-        for k in keys:
-            i2s[k] = enum[k]
-            s2i[enum[k]] = k
-        Field.__init__(self, name, default, fmt)
-
-    def any2i_one(self, pkt, x):
-        if type(x) is str:
-            if (x == 'TLS_2_1') | (x == 'RSA_WITH_AES_512_CBC_SHA'):
-               x = 70
-            else:
-               x = self.s2i[x]
-        return x
-
-    def any2i_one_negative_case(self, pkt, x):
-        if type(x) is str:
-            x = 770
-        return x
-    def i2repr_one(self, pkt, x):
-        if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
-            return self.i2s[x]
-        return repr(x)
-
-    def any2i(self, pkt, x):
-        if type(x) is list:
-            return map(lambda z,pkt=pkt:self.any2i_one(pkt,z), x)
-        else:
-            return self.any2i_one(pkt,x)
-    def i2repr(self, pkt, x):
-        if type(x) is list:
-            return map(lambda z,pkt=pkt:self.i2repr_one(pkt,z), x)
-        else:
-            return self.i2repr_one(pkt,x)
-
-#scapy_obj = EnumField()
-#scapy_obj.any2i_one = scapy_obj.any2i_one_negative_case
-
-class CharEnumField(EnumField):
-    def __init__(self, name, default, enum, fmt = "1s"):
-        EnumField.__init__(self, name, default, enum, fmt)
-        k = self.i2s.keys()
-        if k and len(k[0]) != 1:
-            self.i2s,self.s2i = self.s2i,self.i2s
-    def any2i_one(self, pkt, x):
-        if len(x) != 1:
-            x = self.s2i[x]
-        return x
-
-class BitEnumField(BitField,EnumField):
-    def __init__(self, name, default, size, enum):
-        EnumField.__init__(self, name, default, enum)
-        self.rev = size < 0
-        self.size = abs(size)
-    def any2i(self, pkt, x):
-        return EnumField.any2i(self, pkt, x)
-    def i2repr(self, pkt, x):
-        return EnumField.i2repr(self, pkt, x)
-
-class ShortEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "H")
-
-class LEShortEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "<H")
-
-class ByteEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "B")
-
-class IntEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "I")
-
-class SignedIntEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "i")
-    def randval(self):
-        return RandSInt()
-
-class LEIntEnumField(EnumField):
-    def __init__(self, name, default, enum):
-        EnumField.__init__(self, name, default, enum, "<I")
-
-class XShortEnumField(ShortEnumField):
-    def i2repr_one(self, pkt, x):
-        if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
-            return self.i2s[x]
-        return lhex(x)
-
-class MultiEnumField(EnumField):
-    def __init__(self, name, default, enum, depends_on, fmt = "H"):
-
-        self.depends_on = depends_on
-        self.i2s_multi = enum
-        self.s2i_multi = {}
-        self.s2i_all = {}
-        for m in enum:
-            self.s2i_multi[m] = s2i = {}
-            for k,v in enum[m].iteritems():
-                s2i[v] = k
-                self.s2i_all[v] = k
-        Field.__init__(self, name, default, fmt)
-    def any2i_one(self, pkt, x):
-        if type (x) is str:
-            v = self.depends_on(pkt)
-            if v in self.s2i_multi:
-                s2i = self.s2i_multi[v]
-                if x in s2i:
-                    return s2i[x]
-            return self.s2i_all[x]
-        return x
-    def i2repr_one(self, pkt, x):
-        v = self.depends_on(pkt)
-        if v in self.i2s_multi:
-            return self.i2s_multi[v].get(x,x)
-        return x
-
-class BitMultiEnumField(BitField,MultiEnumField):
-    def __init__(self, name, default, size, enum, depends_on):
-        MultiEnumField.__init__(self, name, default, enum)
-        self.rev = size < 0
-        self.size = abs(size)
-    def any2i(self, pkt, x):
-        return MultiEnumField.any2i(self, pkt, x)
-    def i2repr(self, pkt, x):
-        return MultiEnumField.i2repr(self, pkt, x)
-
-
-class ByteEnumKeysField(ByteEnumField):
-    """ByteEnumField that picks valid values when fuzzed. """
-    def randval(self):
-        return RandEnumKeys(self.i2s)
-
-
-class ShortEnumKeysField(ShortEnumField):
-    """ShortEnumField that picks valid values when fuzzed. """
-    def randval(self):
-        return RandEnumKeys(self.i2s)
-
-
-class IntEnumKeysField(IntEnumField):
-    """IntEnumField that picks valid values when fuzzed. """
-    def randval(self):
-        return RandEnumKeys(self.i2s)
-
-
-# Little endian long field
-class LELongField(Field):
-    def __init__(self, name, default):
-        Field.__init__(self, name, default, "<Q")
-
-# Little endian fixed length field
-class LEFieldLenField(FieldLenField):
-    def __init__(self, name, default,  length_of=None, fmt = "<H", count_of=None, adjust=lambda pkt,x:x, fld=None):
-        FieldLenField.__init__(self, name, default, length_of=length_of, fmt=fmt, count_of=count_of, fld=fld, adjust=adjust)
-
-
-class FlagsField(BitField):
-    def __init__(self, name, default, size, names):
-        self.multi = type(names) is list
-        if self.multi:
-            self.names = map(lambda x:[x], names)
-        else:
-            self.names = names
-        BitField.__init__(self, name, default, size)
-    def any2i(self, pkt, x):
-        if type(x) is str:
-            if self.multi:
-                x = map(lambda y:[y], x.split("+"))
-            y = 0
-            for i in x:
-                y |= 1 << self.names.index(i)
-            x = y
-        return x
-    def i2repr(self, pkt, x):
-        if type(x) is list or type(x) is tuple:
-            return repr(x)
-        if self.multi:
-            r = []
-        else:
-            r = ""
-        i=0
-        while x:
-            if x & 1:
-                r += self.names[i]
-            i += 1
-            x >>= 1
-        if self.multi:
-            r = "+".join(r)
-        return r
-
-
-
-
-class FixedPointField(BitField):
-    def __init__(self, name, default, size, frac_bits=16):
-        self.frac_bits = frac_bits
-        BitField.__init__(self, name, default, size)
-
-    def any2i(self, pkt, val):
-        if val is None:
-            return val
-        ival = int(val)
-        fract = int( (val-ival) * 2**self.frac_bits )
-        return (ival << self.frac_bits) | fract
-
-    def i2h(self, pkt, val):
-        int_part = val >> self.frac_bits
-        frac_part = val & (1L << self.frac_bits) - 1
-        frac_part /= 2.0**self.frac_bits
-        return int_part+frac_part
-    def i2repr(self, pkt, val):
-        return self.i2h(pkt, val)
-
-
-# Base class for IPv4 and IPv6 Prefixes inspired by IPField and IP6Field.
-# Machine values are encoded in a multiple of wordbytes bytes.
-class _IPPrefixFieldBase(Field):
-    def __init__(self, name, default, wordbytes, maxbytes, aton, ntoa, length_from):
-        self.wordbytes= wordbytes
-        self.maxbytes= maxbytes
-        self.aton= aton
-        self.ntoa= ntoa
-        Field.__init__(self, name, default, "%is" % self.maxbytes)
-        self.length_from= length_from
-
-    def _numbytes(self, pfxlen):
-        wbits= self.wordbytes * 8
-        return ((pfxlen + (wbits - 1)) / wbits) * self.wordbytes
-
-    def h2i(self, pkt, x):
-        # "fc00:1::1/64" -> ("fc00:1::1", 64)
-        [pfx,pfxlen]= x.split('/')
-        self.aton(pfx) # check for validity
-        return (pfx, int(pfxlen))
-
-
-    def i2h(self, pkt, x):
-        # ("fc00:1::1", 64) -> "fc00:1::1/64"
-        (pfx,pfxlen)= x
-        return "%s/%i" % (pfx,pfxlen)
-
-    def i2m(self, pkt, x):
-        # ("fc00:1::1", 64) -> ("\xfc\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 64)
-        (pfx,pfxlen)= x
-        s= self.aton(pfx);
-        return (s[:self._numbytes(pfxlen)], pfxlen)
-
-    def m2i(self, pkt, x):
-        # ("\xfc\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 64) -> ("fc00:1::1", 64)
-        (s,pfxlen)= x
-
-        if len(s) < self.maxbytes:
-            s= s + ("\0" * (self.maxbytes - len(s)))
-        return (self.ntoa(s), pfxlen)
-
-    def any2i(self, pkt, x):
-        if x is None:
-            return (self.ntoa("\0"*self.maxbytes), 1)
-
-        return self.h2i(pkt,x)
-
-    def i2len(self, pkt, x):
-        (_,pfxlen)= x
-        return pfxlen
-
-    def addfield(self, pkt, s, val):
-        (rawpfx,pfxlen)= self.i2m(pkt,val)
-        fmt= "!%is" % self._numbytes(pfxlen)
-        return s+struct.pack(fmt, rawpfx)
-
-    def getfield(self, pkt, s):
-        pfxlen= self.length_from(pkt)
-        numbytes= self._numbytes(pfxlen)
-        fmt= "!%is" % numbytes
-        return s[numbytes:], self.m2i(pkt, (struct.unpack(fmt, s[:numbytes])[0], pfxlen))
-
-
-class IPPrefixField(_IPPrefixFieldBase):
-    def __init__(self, name, default, wordbytes=1, length_from= None):
-        _IPPrefixFieldBase.__init__(self, name, default, wordbytes, 4, inet_aton, inet_ntoa, length_from)
-
-
-class IP6PrefixField(_IPPrefixFieldBase):
-    def __init__(self, name, default, wordbytes= 1, length_from= None):
-        _IPPrefixFieldBase.__init__(self, name, default, wordbytes, 16, lambda a: inet_pton(socket.AF_INET6, a), lambda n: inet_ntop(socket.AF_INET6, n), length_from)
-
-#sriptpath_for_dynamic_import = /usr/local/lib/python2.7/dist-packages/scapy/fileds.py
-#sys.path.append(os.path.abspath(scriptpath))
-#import fields *
-
-
diff --git a/src/test/setup/calix_fabric_test_netcfg.json b/src/test/setup/calix_fabric_test_netcfg.json
deleted file mode 100644
index de4defd..0000000
--- a/src/test/setup/calix_fabric_test_netcfg.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
-    "devices": {
-        "of:0000480fcfae6e0a": {
-            "segmentrouting": {
-                "name": "device-480fcfae6e0a",
-                "ipv4NodeSid": 100,
-                "ipv4Loopback": "10.6.0.102",
-                "routerMac": "48:0f:cf:ae:6e:0a",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfaedea8": {
-            "segmentrouting": {
-                "name": "device-480fcfaedea8",
-                "ipv4NodeSid": 101,
-                "ipv4Loopback": "10.6.0.104",
-                "routerMac": "48:0f:cf:ae:de:a8",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfae8e12": {
-            "segmentrouting": {
-                "name": "device-480fcfae8e12",
-                "ipv4NodeSid": 102,
-                "ipv4Loopback": "10.6.0.101",
-                "routerMac": "48:0f:cf:ae:8e:12",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfaeeeac": {
-            "segmentrouting": {
-                "name": "device-480fcfaeeeac",
-                "ipv4NodeSid": 103,
-                "ipv4Loopback": "10.6.0.103",
-                "routerMac": "48:0f:cf:ae:ee:ac",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        }
-    },
-    "ports": {
-        "of:0000480fcfaedea8/2": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.2.254/24" ],
-                    "vlan-untagged" : 2
-                }
-            ]
-        },
-        "of:0000480fcfaedea8/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.2.254/24" ],
-                    "vlan-untagged" : 2
-                }
-            ]
-        },
-        "of:0000480fcfaeeeac/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.1.254/24" ],
-                    "vlan-untagged" : 1
-                }
-            ]
-        }
-    },
-    "apps" : {
-        "org.onosproject.segmentrouting" : {
-            "segmentrouting" : {
-                "vRouterMacs" : [ "a4:23:05:06:01:01" ]
-            },
-            "xconnect": {
-              "of:0000480fcfaedea8": [{
-                "vlan": 333,
-                "ports": [1, 2],
-                "name": "vsg-1"
-                },
-                {
-                "vlan": 555,
-                "ports": [1, 2],
-                "name": "vsg-2"},
-                {
-                "vlan": 666,
-                "ports": [1, 2],
-                "name": "vsg-3"}]
-           }
-        }
-    }
-}
\ No newline at end of file
diff --git a/src/test/setup/cord-test.py b/src/test/setup/cord-test.py
deleted file mode 100755
index e7f3f3a..0000000
--- a/src/test/setup/cord-test.py
+++ /dev/null
@@ -1,1752 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from argparse import ArgumentParser
-import os,sys,time,socket,errno
-import shutil, platform, re
-utils_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../utils')
-cli_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../cli')
-sys.path.append(utils_dir)
-sys.path.append(cli_dir)
-sys.path.insert(1, '/usr/local/lib/python2.7/dist-packages')
-from CordTestUtils import get_mac, getstatusoutput
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnosFlowCtrl import OnosFlowCtrl
-from threadPool import ThreadPool
-from CordContainer import *
-from CordTestServer import cord_test_server_start,cord_test_server_stop,cord_test_server_shutdown,CORD_TEST_HOST,CORD_TEST_PORT
-from TestManifest import TestManifest
-from VolthaCtrl import VolthaService
-from EapolAAA import get_radius_macs, get_radius_networks, radius_restore_users, RADIUS_USER_MAC_START, RADIUS_USER_MAC_END
-
-try:
-    from docker import APIClient as Client
-except:
-    from docker import Client
-from docker.utils import kwargs_from_env
-from Xos import XosServiceProfile
-try:
-    from Fabric import FabricMAAS
-except:
-    FabricMAAS = None
-
-class CordTester(Container):
-    sandbox = '/root/test'
-    sandbox_setup = '/root/test/src/test/setup'
-    tester_base = os.path.dirname(os.path.realpath(__file__))
-    tester_paths = os.path.realpath(__file__).split(os.path.sep)
-    tester_path_index = tester_paths.index('src') - 1
-    sandbox_host = os.path.sep.join(tester_paths[:tester_path_index+1])
-
-    host_guest_map = ( (sandbox_host, sandbox),
-                       ('/lib/modules', '/lib/modules'),
-                       ('/var/run/docker.sock', '/var/run/docker.sock')
-                       )
-    basename = 'cord-tester'
-    switch_on_olt = False
-    IMAGE = 'cordtest/nose'
-    ALL_TESTS = ('tls', 'dhcp', 'dhcprelay','igmp', 'subscriber',
-    'cordSubscriber', 'vrouter', 'flows', 'proxyarp', 'acl', 'xos', 'fabric',
-    'cbench', 'cluster', 'netCondition', 'cordvtn', 'iperf', 'mini', 'vsg')
-
-    dhcp_data_dir = os.path.join(tester_base, '..', 'setup')
-    default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
-    default_options = [ ('subnet-mask', '255.255.255.0'),
-                     ('broadcast-address', '192.168.1.255'),
-                     ('domain-name-servers', '192.168.1.1'),
-                     ('domain-name', '"mydomain.cord-tester"'),
-                   ]
-    default_subnet_config = [ ('192.168.1.2',
-'''
-subnet 192.168.1.0 netmask 255.255.255.0 {
-    range 192.168.1.10 192.168.1.100;
-}
-'''), ]
-    host_ip_map = {}
-    relay_interfaces_last = ()
-    interface_to_mac_map = {}
-    configs = {}
-
-
-    def __init__(self, tests, instance = 0, num_instances = 1, ctlr_ip = None,
-                 name = '', image = IMAGE, prefix = '', tag = 'candidate',
-                 env = None, rm = False, update = False, network = None, radius = None):
-        self.tests = tests
-        self.ctlr_ip = ctlr_ip
-        #Uncomment to enable configuring radius server ports on ovs bridge
-        self.radius = radius
-        self.rm = rm
-        self.name = name or self.get_name(num_instances)
-        super(CordTester, self).__init__(self.name, image = image, prefix = prefix, tag = tag)
-        host_config = self.create_host_config(host_guest_map = self.host_guest_map, privileged = True)
-        volumes = []
-        for _, g in self.host_guest_map:
-            volumes.append(g)
-        if update is True or not self.img_exists():
-            self.build_image(self.image_name)
-        self.create = True
-        #check if are trying to run tests on existing container
-        if not self.exists():
-            ##Remove test container if any
-            self.remove_container(self.name, force=True)
-        else:
-            self.create = False
-            self.rm = False
-        self.olt = False
-        self.switch_started = False
-        olt_config_file = 'olt_config.json'
-        if env is not None:
-            if env.has_key('OLT_CONFIG'):
-                self.olt = True
-            if env.has_key('OLT_CONFIG_FILE'):
-                olt_config_file = os.path.basename(env['OLT_CONFIG_FILE'])
-        olt_conf_file = os.path.join(self.tester_base, olt_config_file)
-        olt_config = OltConfig(olt_conf_file)
-        self.olt_conf_file = olt_conf_file
-        self.port_map, _ = olt_config.olt_port_map()
-        self.vcpes = olt_config.get_vcpes()
-        #Try using the host interface in olt conf to setup the switch
-        self.switches = self.port_map['switches']
-        voltha_network = VolthaService.get_network('voltha')
-        voltha_rest_ip = VolthaService.get_ip('chameleon')
-        if env is not None:
-            env['TEST_SWITCH'] = self.switches[0]
-            env['TEST_SWITCHES'] = ','.join(self.switches)
-            env['TEST_HOST'] = self.name
-            env['TEST_INSTANCE'] = instance
-            env['TEST_INSTANCES'] = num_instances
-            if voltha_rest_ip:
-                env['VOLTHA_HOST'] = voltha_rest_ip
-        if self.create:
-            print('Starting test container %s, image %s, tag %s' %(self.name, self.image, self.tag))
-            self.start(rm = False, volumes = volumes, environment = env,
-                       host_config = host_config, tty = True)
-            if network is not None:
-                Container.connect_to_network(self.name, network)
-            if voltha_network:
-                print('Connecting container to VOLTHA container network %s' %(voltha_network))
-                Container.connect_to_network(self.name, voltha_network)
-
-    def execute_switch(self, cmd, shell = False):
-        if self.olt:
-            return os.system(cmd)
-        return self.execute(cmd, shell = shell)
-
-    def test_flow(self, switch):
-        if not self.olt:
-            return False
-        egress = 1
-        ingress = 2
-        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
-        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
-        device_id = 'of:{}'.format(get_mac(switch))
-        ctlr = self.ctlr_ip.split(',')[0]
-        flow = OnosFlowCtrl(deviceId = device_id,
-                            egressPort = egress,
-                            ingressPort = ingress,
-                            ethType = '0x800',
-                            ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
-                            ipDst = ('IPV4_DST', egress_map['ip']+'/32'),
-                            controller = ctlr
-                            )
-        result = flow.addFlow()
-        if result != True:
-            return result
-        time.sleep(1)
-        #find and remove the flow
-        flow_id = flow.findFlow(device_id, IN_PORT = ('port', ingress),
-                                ETH_TYPE = ('ethType','0x800'), IPV4_SRC = ('ip', ingress_map['ip']+'/32'),
-                                IPV4_DST = ('ip', egress_map['ip']+'/32'))
-        result = False
-        if flow_id:
-            result = True
-            flow.removeFlow(device_id, flow_id)
-        return result
-
-    def ctlr_switch_availability(self, switch):
-        '''Test Add and verify flows with IPv4 selectors'''
-        if not self.olt:
-            return False
-        device_id = 'of:{}'.format(get_mac(switch))
-        ctlr = self.ctlr_ip.split(',')[0]
-        devices = OnosCtrl.get_devices(controller = ctlr)
-        if devices:
-            device = filter(lambda d: d['id'] == device_id, devices)
-            if device:
-                return True
-        return False
-
-    def start_switch(self, manifest, boot_delay = 2):
-        """Start OVS"""
-        ##Determine if OVS has to be started locally or not
-        s_file,s_sandbox = ('of-bridge-local.sh',self.tester_base) if self.olt else ('of-bridge.sh',self.sandbox_setup)
-        ovs_cmd = os.path.join(s_sandbox, s_file)
-        switches = filter(lambda sw: sw.startswith('br-int'), self.switches)
-        if self.olt:
-            if CordTester.switch_on_olt is True:
-                return
-            CordTester.switch_on_olt = True
-            ovs_cmd += ' {} {}'.format(len(switches), self.ctlr_ip)
-            if manifest.voltha_enable and manifest.voltha_loc and Onos.ssl_key:
-                ovs_cmd += ' {}'.format(manifest.voltha_loc)
-            print('Starting OVS on the host with %d switches for controller: %s' %(len(switches), self.ctlr_ip))
-        else:
-            ovs_cmd += ' {}'.format(self.switches[0])
-            print('Starting OVS on test container %s for controller: %s' %(self.name, self.ctlr_ip))
-        self.execute_switch(ovs_cmd)
-        time.sleep(5)
-        ## Wait for the controller to see the switch
-        for switch in switches:
-            status = 1
-            tries = 0
-            result = self.ctlr_switch_availability(switch) and self.test_flow(switch)
-            if result == True:
-                status = 0
-            while status != 0 and tries < 500:
-                cmd = 'sudo ovs-ofctl dump-flows {0} | grep \"type=0x8942\"'.format(switch)
-                status = self.execute_switch(cmd, shell = True)
-                tries += 1
-                if status != 0 and tries > 100:
-                    if self.ctlr_switch_availability(switch):
-                        status = 0
-                if tries % 10 == 0:
-                    print('Waiting for test switch %s to be connected to ONOS controller ...' %switch)
-
-            if status != 0:
-                print('Test Switch %s not connected to ONOS container.'
-                      'Please remove ONOS container and restart the test' %switch)
-                if self.rm:
-                    self.kill()
-                sys.exit(1)
-            else:
-                print('Test Switch %s connected to ONOS container.' %switch)
-
-        if boot_delay:
-            time.sleep(boot_delay)
-
-        self.switch_started = True
-
-    def setup_dhcpd(self,  manifest, boot_delay = 5):
-        return False
-        if not self.olt or not manifest.start_switch:
-            return False
-        if self.service_running("/usr/sbin/dhcpd"):
-            print('DHCPD already running in container %s' %self.name)
-            return True
-        setup_for_relay = self.dhcp_relay_setup()
-        dhcp_start_status = self.dhcpd_start()
-        if setup_for_relay and dhcp_start_status:
-           return True
-        else:
-           return False
-
-    def dhcp_relay_setup(self):
-        ctlr = self.ctlr_ip.split(',')[0]
-        did = OnosCtrl.get_device_id(controller = ctlr, mfr = 'Nicira', olt_conf_file = self.olt_conf_file)
-        self.relay_device_id = did
-        if self.port_map:
-            ##get the relay port for the OVS switch
-            relay_port = None
-            for host_intf, ports in self.port_map['switch_relay_port_list']:
-                if host_intf.startswith('br-int'):
-                    relay_port = ports[0]
-                    break
-
-            ##Per subscriber, we use 1 relay port
-            try:
-                relay_port = self.port_map[relay_port]
-            except:
-                relay_port = self.port_map['uplink']
-            self.relay_interface_port = relay_port
-            self.relay_interfaces = (self.port_map[self.relay_interface_port],)
-        else:
-             print 'Setup dhcpd we must have port_map'
-             return False
-        if self.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in self.port_map['ports']:
-                port_num = self.port_map[port]
-                if port_num == self.port_map['uplink']:
-                    continue
-                ip = self.get_host_ip(port_num)
-                mac = self.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure dhcp server virtual interface on the same subnet as first client interface
-            relay_ip = self.get_host_ip(interface_list[0][0])
-            relay_mac = self.get_mac(self.port_map[self.relay_interface_port])
-            interface_list.append((self.relay_interface_port, relay_ip, relay_mac))
-            self.onos_interface_load(interface_list)
-        return True
-
-    def onos_load_config(self, config):
-        ctlr = self.ctlr_ip.split(',')[0]
-        status, code = OnosCtrl.config(config, controller = ctlr)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(3)
-
-    def onos_interface_load(self, interface_list):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(self.relay_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        self.onos_load_config(interface_dict)
-        self.configs['interface_config'] = interface_dict
-
-    def get_host_ip(self, port):
-        if self.host_ip_map.has_key(port):
-            return self.host_ip_map[port]
-        self.host_ip_map[port] = '192.168.100.{}'.format(port)
-        return self.host_ip_map[port]
-
-    def get_mac(self, iface):
-        if self.interface_to_mac_map.has_key(iface):
-            return self.interface_to_mac_map[iface]
-        cmd = 'docker exec %s ip link show %s | awk \'/ether/ {print $2}\'' %(self.name, iface)
-        st, mac = getstatusoutput(cmd)
-        assert st == 0, 'Cannot get MAC for interface {}'.format(iface)
-        mac = mac.strip()
-        self.interface_to_mac_map[iface] = mac
-        return mac
-
-    def service_running(self, pattern):
-        st, _ = getstatusoutput('docker exec {} pgrep -f "{}"'.format(self.name, pattern))
-        return True if st == 0 else False
-
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    def dhcpd_start(self, intf_list = None,
-                    config = default_config, options = default_options,
-                    subnet = default_subnet_config):
-        '''Start the dhcpd server by generating the conf file'''
-        if intf_list is None:
-            intf_list = self.relay_interfaces
-        ##stop dhcpd if already running
-        #self.dhcpd_stop()
-        dhcp_conf = self.dhcpd_conf_generate(config = config, options = options,
-                                             subnet = subnet)
-        ##first touch dhcpd.leases if it doesn't exist
-        lease_file = '{}/dhcpd.leases'.format(self.dhcp_data_dir)
-        if os.access(lease_file, os.F_OK) is False:
-            with open(lease_file, 'w') as fd: pass
-
-        conf_file = '{}/dhcpd.conf'.format(self.dhcp_data_dir)
-        with open(conf_file, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        #now configure the dhcpd interfaces for various subnets
-        index = 0
-        intf_info = []
-        for ip,_ in subnet:
-            intf = intf_list[index]
-            mac = self.get_mac(intf)
-            intf_info.append((ip, mac))
-            index += 1
-            cmd = 'ifconfig {} {}'.format(intf, ip)
-            status = self.execute(cmd, shell = True)
-
-        intf_str = ','.join(intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format('/root/test/src/test/setup/dhcpd.conf','/root/test/src/test/setup/dhcpd.leases', intf_str)
-        print('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        status = self.execute(dhcpd_cmd, shell = True)
-        if status > 255:
-           status = 1
-        else:
-           return False
-        time.sleep(3)
-        self.relay_interfaces_last = self.relay_interfaces
-        self.relay_interfaces = intf_list
-        #self.onos_dhcp_relay_load(*intf_info[0])
-        return True
-
-    def setup_vcpes(self, port_num = 0):
-        res = 0
-        for vcpe in self.vcpes:
-            port, s_tag, c_tag = vcpe['port'], vcpe['s_tag'], vcpe['c_tag']
-            if os.access('/sys/class/net/{}'.format(port), os.F_OK):
-                guest_port = 'vcpe{}'.format(port_num)
-                port_num += 1
-                print('Provisioning guest port %s for %s with host port: %s, s_tag: %d, c_tag: %d\n'
-                      %(guest_port, self.name, port, s_tag, c_tag))
-                cmd = 'pipework {} -i {} -l {} {} 0.0.0.0/24'.format(port, guest_port, guest_port, self.name)
-                res = os.system(cmd)
-                if res == 0:
-                    vlan_outer_port = '{}.{}'.format(guest_port, s_tag)
-                    vlan_inner_port = '{}.{}'.format(vlan_outer_port, c_tag)
-                    #configure the s_tag/c_tag interfaces inside the guest container
-                    cmds = ('ip link set {} up'.format(guest_port),
-                            'ip link add link {} name {} type vlan id {}'.format(guest_port,
-                                                                                 vlan_outer_port,
-                                                                                 s_tag),
-                            'ip link set {} up'.format(vlan_outer_port),
-                            'ip link add link {} name {} type vlan id {}'.format(vlan_outer_port,
-                                                                                 vlan_inner_port,
-                                                                                 c_tag),
-                            'ip link set {} up'.format(vlan_inner_port),
-                            )
-                    res += self.execute(cmds, shell = True)
-
-    @classmethod
-    def cleanup_vcpes(cls, vcpes):
-        port_num = 0
-        for vcpe in vcpes:
-            port = vcpe['port']
-            if os.access('/sys/class/net/{}'.format(port), os.F_OK):
-                local_port = 'vcpe{}'.format(port_num)
-                cmd = 'ip link del {}'.format(local_port)
-                os.system(cmd)
-                port_num += 1
-
-    def setup_intfs(self, port_num = 0):
-        tester_intf_subnet = '192.168.100'
-        res = 0
-        switches = self.port_map['switches']
-        start_vlan = self.port_map['start_vlan']
-        ponsim = self.port_map['ponsim']
-        start_vlan += port_num
-        uplink = self.port_map['uplink']
-        wan = self.port_map['wan']
-        if ponsim is True:
-            if not wan:
-                wan = 'ponmgmt'
-        vcpe_port_num = port_num
-        port_list = self.port_map['switch_port_list'] + self.port_map['switch_relay_port_list']
-        subscribers = self.port_map['num_ports'] * len(self.port_map['switch_port_list'])
-        subscriber_macs = get_radius_macs(subscribers,
-                                          start = RADIUS_USER_MAC_START,
-                                          end = RADIUS_USER_MAC_END)
-        print('Provisioning the ports for the test container\n')
-        for host_intf, ports in port_list:
-            if self.switch_started is False and host_intf.startswith('br-int'):
-                continue
-            setup_ponsim = ponsim
-            host_index = 0
-            host_intf_base = 'pon1'
-            #if the host interface/switch does not exist, just create a dummy ovs switch
-            #needed if we are running with no-switch option
-            if not os.access('/sys/class/net/{}'.format(host_intf), os.F_OK):
-                os.system('ovs-vsctl add-br {}'.format(host_intf))
-            uplink = self.port_map[host_intf]['uplink']
-            if setup_ponsim is True:
-                if host_intf.find('_') < 0:
-                    print('Invalid host interface specified with ponsim. Disabling ponsim setup')
-                    setup_ponsim = False
-                else:
-                    try:
-                        host_index = int(host_intf.split('_')[-1])
-                        host_intf_base = host_intf.split('_')[0]
-                    except:
-                        print('Invalid host interface with ponsim. Disabling ponsim setup')
-                        setup_ponsim = False
-            for port in ports:
-                guest_if = port
-                local_if = 'l{}'.format(port_num+1) #port #'{0}_{1}'.format(guest_if, port_num+1)
-                guest_ip = '{0}.{1}/24'.format(tester_intf_subnet, port_num+1)
-                if setup_ponsim is True:
-                    if port != self.port_map[uplink]:
-                        host_intf = '{}_{}'.format(host_intf_base, host_index)
-                        host_index += 1
-
-                port_mac = ''
-                if len(subscriber_macs) > 0:
-                    port_mac = subscriber_macs.pop(0)
-
-                ##Use pipeworks to configure container interfaces on host/bridge interfaces
-                pipework_cmd = 'pipework {0} -i {1} -l {2} {3} {4} {5}'.format(host_intf, guest_if,
-                                                                               local_if, self.name, guest_ip,
-                                                                               port_mac)
-                #if the wan interface is specified for uplink, then use it instead
-                if wan and port == self.port_map[uplink]:
-                    pipework_cmd = 'pipework {0} -i {1} -l {2} {3} {4} {5}'.format(wan, guest_if,
-                                                                                   local_if, self.name, guest_ip,
-                                                                                   port_mac)
-                else:
-                    if start_vlan != 0:
-                        pipework_cmd += ' @{}'.format(start_vlan)
-                        start_vlan += 1
-                print('Running PIPEWORK cmd: %s' %pipework_cmd)
-                res += os.system(pipework_cmd)
-                port_num += 1
-
-            if setup_ponsim is True:
-                ponsim = False
-                wan = None
-
-        if self.switch_started and self.radius:
-            radius_macs = get_radius_macs(len(self.port_map['radius_ports']))
-            radius_intf_index = 0
-            radius_networks = get_radius_networks(len(self.port_map['switch_radius_port_list']))
-            index = 0
-            for host_intf, ports in self.port_map['switch_radius_port_list']:
-                prefix, subnet, gw = radius_networks[index]
-                mask = subnet.split('/')[-1]
-                #configure the host interface as the gateway
-                cmds = ( #'ip addr add {}/{} dev {}'.format(gw, mask, host_intf),
-                        'ip link set {} up'.format(host_intf),
-                )
-                print('Configuring host interface %s for radius server' %(host_intf))
-                for cmd in cmds:
-                    print('Running command: %s' %(cmd))
-                    res += os.system(cmd)
-                index += 1
-                for port in ports:
-                    guest_if = 'eth{}'.format(radius_intf_index+2)
-                    port_index = self.port_map[port]
-                    local_if = 'r{}'.format(port_index)
-                    guest_ip = '{}.{}/{}'.format(prefix, port_index, mask)
-                    mac = radius_macs[radius_intf_index]
-                    radius_intf_index += 1
-                    port_num += 1
-                    pipework_cmd = 'pipework {0} -i {1} -l {2} {3} {4} {5}'.format(host_intf, guest_if,
-                                                                                   local_if, self.radius.name,
-                                                                                   guest_ip, mac)
-                    print('Configuring Radius port %s on OVS bridge %s' %(guest_if, host_intf))
-                    print('Running pipework command: %s' %(pipework_cmd))
-                    res += os.system(pipework_cmd)
-                    rp_filter_disable = 'docker exec {} '\
-                                        'sysctl -w net.ipv4.conf.{}.rp_filter=2'.format(self.radius.name,
-                                                                                        guest_if)
-                    print('Disabling rp filter on radius interface %s' %(guest_if))
-                    res += os.system(rp_filter_disable)
-                    brd = '{}.255'.format(prefix)
-                    brd_cmd = 'docker exec {} ifconfig {} broadcast {} up'.format(self.radius.name,
-                                                                                  guest_if,
-                                                                                  brd)
-                    print('Setting broadcast address to %s for radius interface %s' %(brd, guest_if))
-                    res += os.system(brd_cmd)
-
-        self.setup_vcpes(vcpe_port_num)
-        return res, port_num
-
-    @classmethod
-    def get_intf_type(cls, intf):
-        intf_type = 0
-        if os.path.isdir('/sys/class/net/{}/bridge'.format(intf)):
-            intf_type = 1 ##linux bridge
-        else:
-            cmd = 'ovs-vsctl list-br | grep -q "^{0}$"'.format(intf)
-            res = os.system(cmd)
-            if res == 0: ##ovs bridge
-                intf_type = 2
-
-        return intf_type
-
-    @classmethod
-    def cleanup_intfs(cls, olt_conf_file):
-        if not os.access(olt_conf_file, os.F_OK):
-            olt_conf_file = os.path.join(cls.tester_base, os.path.basename(olt_conf_file))
-        olt_config = OltConfig(olt_conf_file)
-        port_map, _ = olt_config.olt_port_map()
-        vcpes = olt_config.get_vcpes()
-        port_num = 0
-        start_vlan = port_map['start_vlan']
-        wan = port_map['wan']
-        res = 0
-        port_list = port_map['switch_port_list'] + port_map['switch_relay_port_list']
-        for intf_host, ports in port_list:
-            intf_type = cls.get_intf_type(intf_host)
-            for port in ports:
-                cmds = ()
-                local_if = 'l{}'.format(port_num+1) #port #'{0}_{1}'.format(port, port_num+1)
-                if intf_type == 0:
-                    if start_vlan != 0:
-                        cmds = ('ip link del {}.{}'.format(intf_host, start_vlan),)
-                        start_vlan += 1
-                else:
-                    if intf_type == 1:
-                        cmds = ('brctl delif {} {}'.format(intf_host, local_if),
-                                'ip link del {}'.format(local_if))
-                    else:
-                        cmds = ('ovs-vsctl del-port {} {}'.format(intf_host, local_if),
-                                'ip link del {}'.format(local_if))
-
-                for cmd in cmds:
-                    res += os.system(cmd)
-                port_num += 1
-
-        for intf_host, ports in port_map['switch_radius_port_list']:
-            intf_type = cls.get_intf_type(intf_host)
-            for port in ports:
-                port_index = port_map[port]
-                local_if = 'r{}'.format(port_index)
-                cmds = ()
-                if intf_type == 2:
-                    cmds = ('ovs-vsctl del-port {} {}'.format(intf_host, local_if),
-                            'ip link del {}'.format(local_if))
-
-                for cmd in cmds:
-                    res += os.system(cmd)
-
-        cls.cleanup_vcpes(vcpes)
-
-    @classmethod
-    def get_name(cls, num_instances):
-        cnt_name = '/{0}'.format(cls.basename)
-        cnt_name_len = len(cnt_name)
-        names = list(flatten(n['Names'] for n in cls.dckr.containers(all=True)))
-        test_names = filter(lambda n: n.startswith(cnt_name), names)
-        last_cnt_number = 0
-        if test_names:
-            last_cnt_name = reduce(lambda n1, n2: n1 if int(n1[cnt_name_len:]) > \
-                                       int(n2[cnt_name_len:]) else n2,
-                                   test_names)
-            last_cnt_number = int(last_cnt_name[cnt_name_len:])
-            if num_instances == 1:
-                last_cnt_number -= 1
-        test_cnt_name = cls.basename + str(last_cnt_number+1)
-        return test_cnt_name
-
-    @classmethod
-    def build_image(cls, image):
-        print('Building test container docker image %s' %image)
-        ovs_version = '2.5.0'
-        image_format = (ovs_version,)*4
-        dockerfile = '''
-FROM ubuntu:14.04
-MAINTAINER chetan@ciena.com
-RUN apt-get update  && \
-    apt-get install -y git git-core autoconf automake autotools-dev pkg-config \
-        make gcc g++ libtool libc6-dev cmake libpcap-dev libxerces-c2-dev  \
-        unzip libpcre3-dev flex bison libboost-dev \
-        python python-pip python-setuptools python-scapy tcpdump doxygen doxypy wget \
-        openvswitch-common openvswitch-switch \
-        python-twisted python-sqlite sqlite3 python-pexpect telnet arping isc-dhcp-server \
-        python-paramiko python-maas-client python-keystoneclient python-neutronclient \
-        python-glanceclient python-novaclient python-dev libffi-dev libssl-dev
-RUN easy_install nose
-RUN python -m pip install --upgrade pip
-RUN mkdir -p /root/ovs
-WORKDIR /root
-RUN wget http://openvswitch.org/releases/openvswitch-{}.tar.gz -O /root/ovs/openvswitch-{}.tar.gz && \
-(cd /root/ovs && tar zxpvf openvswitch-{}.tar.gz && \
- cd openvswitch-{} && \
- ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install)
-RUN service openvswitch-switch restart || /bin/true
-RUN pip install scapy==2.3.2 scapy-ssl_tls==1.2.2 monotonic configObj docker-py pyyaml nsenter pyroute2 netaddr python-daemon
-RUN pip install -U cryptography
-RUN pip install -U paramiko
-RUN mv /usr/sbin/tcpdump /sbin/
-RUN ln -sf /sbin/tcpdump /usr/sbin/tcpdump
-RUN mv /usr/sbin/dhcpd /sbin/
-RUN ln -sf /sbin/dhcpd /usr/sbin/dhcpd
-RUN mv /sbin/dhclient /usr/sbin/
-RUN ln -sf /usr/sbin/dhclient /sbin/dhclient
-WORKDIR /root
-RUN wget -nc http://de.archive.ubuntu.com/ubuntu/pool/main/b/bison/bison_2.5.dfsg-2.1_amd64.deb \
-         http://de.archive.ubuntu.com/ubuntu/pool/main/b/bison/libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN sudo dpkg -i bison_2.5.dfsg-2.1_amd64.deb libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN rm bison_2.5.dfsg-2.1_amd64.deb libbison-dev_2.5.dfsg-2.1_amd64.deb
-RUN wget -nc http://www.nbee.org/download/nbeesrc-jan-10-2013.zip && \
-    unzip nbeesrc-jan-10-2013.zip && \
-    cd nbeesrc-jan-10-2013/src && cmake . && make && \
-    cp ../bin/libn*.so /usr/local/lib && ldconfig && \
-    cp -R ../include/* /usr/include/
-WORKDIR /root
-RUN git clone https://github.com/CPqD/ofsoftswitch13.git && \
-    cd ofsoftswitch13 && \
-    ./boot.sh && \
-    ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && \
-    make && make install
-CMD ["/bin/bash"]
-'''.format(*image_format)
-        super(CordTester, cls).build_image(dockerfile, image)
-        print('Done building docker image %s' %image)
-
-    def run_tests(self):
-        '''Run the list of tests'''
-        res = 0
-        print('Modifying scapy tool files before running a test: %s' %self.tests)
-        self.modify_scapy_files_for_specific_tests()
-        print('Running tests: %s' %self.tests)
-        for t in self.tests:
-            test = t.split(':')[0]
-            test_file = '{}Test.py'.format(test)
-            if t.find(':') >= 0:
-                test_case = '{0}:{1}'.format(test_file, t.split(':')[1])
-            else:
-                test_case = test_file
-            cmd = 'nosetests -v {0}/src/test/{1}/{2}'.format(self.sandbox, test, test_case)
-            status = self.execute(cmd, shell = True)
-            if status > 255:
-                status = 1
-            res |= status
-            print('Test %s %s' %(test_case, 'Success' if status == 0 else 'Failure'))
-        print('Done running tests')
-        if self.rm:
-            print('Removing test container %s' %self.name)
-            self.kill(remove=True)
-
-        return res
-
-    def modify_scapy_files_for_specific_tests(self):
-        name = self.name
-        container_cmd_exec = Container(name = name, image = CordTester.IMAGE)
-        tty = False
-        dckr = Client()
-        cmd =  'cp test/src/test/scapy/fields.py /usr/local/lib/python2.7/dist-packages/scapy/fields.py '
-        i = container_cmd_exec.execute(cmd = cmd, tty= tty, stream = True)
-
-    @classmethod
-    def list_tests(cls, tests):
-        print('Listing test cases')
-        for test in tests:
-            test_file = '{}Test.py'.format(test)
-            cmd = 'nosetests -v --collect-only {0}/../{1}/{2}'.format(cls.tester_base, test, test_file)
-            os.system(cmd)
-
-
-##default onos/radius/test container images and names
-onos_image_default='onosproject/onos:latest'
-nose_image_default= '{}:candidate'.format(CordTester.IMAGE)
-test_type_default='dhcp'
-onos_app_version = '3.0-SNAPSHOT'
-cord_tester_base = os.path.dirname(os.path.realpath(__file__))
-olt_config_default = os.path.join(cord_tester_base, 'olt_config.json')
-onos_app_file = os.path.abspath('{0}/../apps/ciena-cordigmp-multitable-'.format(cord_tester_base) + onos_app_version + '.oar')
-cord_test_server_address = '{}:{}'.format(CORD_TEST_HOST, CORD_TEST_PORT)
-identity_file_default = '/etc/maas/ansible/id_rsa'
-onos_log_level = 'INFO'
-
-##sets up the ssh key file for the test container
-def set_ssh_key_file(identity_file):
-    ssh_key_file = None
-    if os.access(identity_file, os.F_OK):
-        ##copy it to setup directory
-        identity_dest = os.path.join(CordTester.tester_base, 'id_rsa')
-        if os.path.abspath(identity_file) != identity_dest:
-            try:
-                shutil.copy(identity_file, identity_dest)
-                ssh_key_file = os.path.join(CordTester.sandbox_setup, 'id_rsa')
-            except: pass
-
-    return ssh_key_file
-
-def openstack_setup(test_cnt_env):
-    admin_rc = os.path.join(os.getenv('HOME'), 'admin-openrc.sh')
-    if not os.access(admin_rc, os.F_OK):
-        admin_rc = os.path.join('/opt/cord_profile', 'admin-openrc.sh')
-    if os.access(admin_rc, os.F_OK):
-        dest = os.path.join(CordTester.tester_base, 'admin-openrc.sh')
-        shutil.copy(admin_rc, dest)
-        with open(dest, 'r') as f:
-            cfg = {}
-            for data in f.read().splitlines():
-                try:
-                    k, v = data.split('=')
-                except:
-                    continue
-
-                k = k.split()[-1]
-                cfg[k] = v
-
-            if 'REQUESTS_CA_BUNDLE' in cfg:
-                #copy the certificate to setup directory
-                cert_src = cfg['REQUESTS_CA_BUNDLE']
-                shutil.copy(cert_src, CordTester.tester_base)
-                test_cert_loc = os.path.join(CordTester.sandbox_setup,
-                                             os.path.basename(cert_src))
-                cfg['REQUESTS_CA_BUNDLE'] = test_cert_loc
-
-            for key, value in cfg.iteritems():
-                test_cnt_env[key] = value
-
-def runTest(args):
-    #Start the cord test tcp server
-    test_manifest = TestManifest(args = args)
-    test_server_params = test_manifest.server.split(':')
-    test_host = test_server_params[0]
-    test_port = CORD_TEST_PORT
-    if len(test_server_params) > 1:
-        test_port = int(test_server_params[1])
-
-    test_containers = []
-    #These tests end up restarting ONOS/quagga/radius
-    tests_exempt = ('vrouter', 'cordSubscriber', 'proxyarp', 'dhcprelay')
-    if args.test_type.lower() == 'all':
-        tests = CordTester.ALL_TESTS
-        args.quagga = True
-    else:
-        tests = args.test_type.split('-')
-
-    tests_parallel = [ t for t in tests if t.split(':')[0] not in tests_exempt ]
-    tests_not_parallel = [ t for t in tests if t.split(':')[0] in tests_exempt ]
-    onos_cnt = {'tag':'latest'}
-    nose_cnt = {'image': CordTester.IMAGE, 'tag': 'candidate'}
-    update_map = { 'quagga' : False, 'test' : False, 'radius' : False }
-    update_map[args.update.lower()] = True
-
-    if args.update.lower() == 'all':
-       for c in update_map.keys():
-           update_map[c] = True
-
-    use_manifest = False
-    if args.manifest:
-        if os.access(args.manifest, os.F_OK):
-            ##copy it to setup directory
-            dest = os.path.join(CordTester.tester_base,
-                                os.path.basename(args.manifest))
-            if os.path.abspath(args.manifest) != dest:
-                try:
-                    shutil.copy(args.manifest, dest)
-                except: pass
-            test_manifest = TestManifest(manifest = dest)
-            use_manifest = True
-        else:
-            print('Unable to access test manifest: %s' %args.manifest)
-
-    onos_ip = test_manifest.onos_ip
-    radius_ip = test_manifest.radius_ip
-    radius = None
-    head_node = test_manifest.head_node
-    iterations = test_manifest.iterations
-    onos_cord_loc = test_manifest.onos_cord
-    service_profile = test_manifest.service_profile
-    synchronizer = test_manifest.synchronizer
-    olt_config_file = test_manifest.olt_config
-    voltha_loc = test_manifest.voltha_loc
-    voltha_intf = test_manifest.voltha_intf
-    if not os.access(olt_config_file, os.F_OK):
-        olt_config_file = os.path.join(CordTester.tester_base, 'olt_config.json')
-    else:
-        dest = os.path.join(CordTester.tester_base,
-                            os.path.basename(olt_config_file))
-        if os.path.abspath(olt_config_file) != dest:
-            try:
-                shutil.copy(olt_config_file, dest)
-            except: pass
-
-    onos_cord = None
-    Onos.update_data_dir(test_manifest.karaf_version)
-    Onos.set_expose_port(test_manifest.expose_port)
-    Radius.create_network()
-    if onos_cord_loc:
-        if onos_cord_loc.find(os.path.sep) < 0:
-            onos_cord_loc = os.path.join(os.getenv('HOME'), onos_cord_loc)
-        if not os.access(onos_cord_loc, os.F_OK):
-            print('ONOS cord config location %s is not accessible' %onos_cord_loc)
-            sys.exit(1)
-        if not onos_ip:
-            ##Unexpected case. Specify the external controller ip when running on cord node
-            print('Specify ONOS ip using \"-e\" option when running the cord-tester on cord node')
-            sys.exit(1)
-        if not service_profile:
-            print('Specify service profile for the ONOS cord instance. Eg: rcord')
-            sys.exit(1)
-        if not synchronizer:
-            print('Specify synchronizer to use for the ONOS cord instance. Eg: vtn, fabric, cord')
-            sys.exit(1)
-        onos_cord = OnosCord(onos_ip, onos_cord_loc, service_profile, synchronizer, skip = test_manifest.skip_onos_restart)
-
-    try:
-        test_server = cord_test_server_start(daemonize = False, cord_test_host = test_host, cord_test_port = test_port,
-                                             onos_cord = onos_cord)
-    except:
-        ##Most likely a server instance is already running (daemonized earlier)
-        test_server = None
-
-    Container.IMAGE_PREFIX = test_manifest.image_prefix
-    Onos.MAX_INSTANCES = test_manifest.onos_instances
-    Onos.JVM_HEAP_SIZE = test_manifest.jvm_heap_size
-    cluster_mode = True if test_manifest.onos_instances > 1 else False
-    async_mode = cluster_mode and test_manifest.async_mode
-    existing_list = [ c['Names'][0][1:] for c in Container.dckr.containers() if c['Image'] == test_manifest.onos_image ]
-    setup_cluster = False if len(existing_list) == test_manifest.onos_instances else True
-    onos_ips = []
-    if cluster_mode is True and len(existing_list) > 1:
-        ##don't setup cluster config again
-        cluster_mode = False
-    if voltha_loc:
-        voltha_key = os.path.join(voltha_loc, 'docker', 'onos_cfg', 'onos.jks')
-        Onos.update_ssl_key(voltha_key)
-    if onos_ip is None:
-        image_names = test_manifest.onos_image.rsplit(':', 1)
-        onos_cnt['image'] = image_names[0]
-        if len(image_names) > 1:
-            if image_names[1].find('/') < 0:
-                onos_cnt['tag'] = image_names[1]
-            else:
-                #tag cannot have slashes
-                onos_cnt['image'] = test_manifest.onos_image
-
-        Onos.IMAGE = onos_cnt['image']
-        Onos.PREFIX = test_manifest.image_prefix
-        Onos.TAG = onos_cnt['tag']
-        data_volume = '{}-data'.format(Onos.NAME) if test_manifest.shared_volume else None
-        onos = Onos(image = Onos.IMAGE,
-                    tag = Onos.TAG, boot_delay = 60, cluster = cluster_mode,
-                    data_volume = data_volume, async = async_mode, network = test_manifest.docker_network)
-        if onos.running:
-            onos_ips.append(onos.ipaddr)
-    else:
-        onos_ips.append(onos_ip)
-
-    num_onos_instances = test_manifest.onos_instances
-    if num_onos_instances > 1 and onos is not None:
-        onos_instances = []
-        onos_instances.append(onos)
-        for i in range(1, num_onos_instances):
-            name = '{}-{}'.format(Onos.NAME, i+1)
-            data_volume = '{}-data'.format(name) if test_manifest.shared_volume else None
-            quagga_config = Onos.get_quagga_config(i)
-            onos = Onos(name = name, image = Onos.IMAGE, tag = Onos.TAG, boot_delay = 60, cluster = cluster_mode,
-                        data_volume = data_volume, async = async_mode,
-                        quagga_config = quagga_config, network = test_manifest.docker_network, instance = i)
-            onos_instances.append(onos)
-            if onos.running:
-                onos_ips.append(onos.ipaddr)
-        if async_mode is True and cluster_mode is True:
-            Onos.start_cluster_async(onos_instances)
-        if not onos_ips:
-            for onos in onos_instances:
-                onos_ips.append(onos.ipaddr)
-        if cluster_mode is True:
-            try:
-                for ip in onos_ips:
-                    print('Installing cord tester ONOS app %s in ONOS instance %s' %(args.app,ip))
-                    OnosCtrl.install_app(args.app, onos_ip = ip)
-            except: pass
-        if setup_cluster is True:
-            Onos.setup_cluster(onos_instances)
-        else:
-            print('ONOS instances already running. Skipping ONOS form cluster for %d instances' %num_onos_instances)
-    ctlr_addr = ','.join(onos_ips)
-
-    print('Controller IP %s, Test type %s' %(onos_ips, args.test_type))
-    if onos_ip is not None:
-        print('Installing ONOS cord apps')
-        try:
-            Onos.install_cord_apps(onos_ip = onos_ip)
-        except: pass
-
-    if not cluster_mode:
-        print('Installing cord tester ONOS app %s' %args.app)
-        try:
-	    for ip in onos_ips:
-                OnosCtrl.install_app(args.app, onos_ip = ip)
-        except: pass
-
-    if voltha_loc:
-        #start voltha
-        voltha = VolthaService(voltha_loc, onos_ips[0], interface = voltha_intf,
-                               olt_config = olt_config_file, container_mode = test_manifest.voltha_container_mode)
-        voltha.start()
-
-    if radius_ip is None:
-        ##Create Radius container
-        radius = Radius(prefix = Container.IMAGE_PREFIX, update = update_map['radius'],
-                        network = test_manifest.docker_network,
-                        olt_config = olt_config_file)
-        radius_ip = radius.ip(network = Radius.NETWORK)
-
-    print('Radius server running with IP %s' %radius_ip)
-
-    if args.quagga == True:
-        #Start quagga. Builds container if required
-        quagga = Quagga(prefix = Container.IMAGE_PREFIX, update = update_map['quagga'],
-                        network = test_manifest.docker_network)
-
-    try:
-        maas_api_key = FabricMAAS.get_api_key()
-    except:
-        maas_api_key = 'UNKNOWN'
-
-    ssh_key_file = set_ssh_key_file(args.identity_file)
-    test_cnt_env = { 'ONOS_CONTROLLER_IP' : ctlr_addr,
-                     'ONOS_AAA_IP' : radius_ip if radius_ip is not None else '',
-                     'QUAGGA_IP': test_host,
-                     'CORD_TEST_HOST' : test_host,
-                     'CORD_TEST_PORT' : test_port,
-                     'ONOS_RESTART' : 0 if test_manifest.olt and args.test_controller else 1,
-                     'LOG_LEVEL': test_manifest.log_level,
-                     'HEAD_NODE': head_node if head_node else CORD_TEST_HOST,
-                     'MAAS_API_KEY': maas_api_key,
-                     'KARAF_VERSION' : test_manifest.karaf_version,
-                     'VOLTHA_ENABLED' : int(test_manifest.voltha_enable)
-                   }
-
-    if ssh_key_file:
-        test_cnt_env['SSH_KEY_FILE'] = ssh_key_file
-
-    olt_conf_test_loc = os.path.join(CordTester.sandbox_setup, os.path.basename(olt_config_file))
-    test_cnt_env['OLT_CONFIG_FILE'] = olt_conf_test_loc
-    if test_manifest.olt:
-        test_cnt_env['OLT_CONFIG'] = olt_conf_test_loc
-
-    if use_manifest:
-        test_cnt_env['MANIFEST'] = os.path.join(CordTester.sandbox_setup,
-                                                os.path.basename(args.manifest))
-
-
-    if iterations is not None:
-        test_cnt_env['ITERATIONS'] = iterations
-
-    openstack_setup(test_cnt_env)
-
-    if args.num_containers > 1 and args.container:
-        print('Cannot specify number of containers with container option')
-        sys.exit(1)
-    if args.container:
-        args.keep = True
-    port_num = 0
-    num_tests = len(tests_parallel)
-    if num_tests > 0 and num_tests < args.num_containers:
-        tests_parallel *= args.num_containers/num_tests
-        num_tests = len(tests_parallel)
-    tests_per_container = max(1, num_tests/args.num_containers)
-    last_batch = num_tests % args.num_containers
-    test_slice_start = 0
-    test_slice_end = test_slice_start + tests_per_container
-    num_test_containers = min(num_tests, args.num_containers)
-    if tests_parallel:
-        print('Running %s tests across %d containers in parallel' %(tests_parallel, num_test_containers))
-    for container in xrange(num_test_containers):
-        if container + 1 == num_test_containers:
-            test_slice_end += last_batch
-        test_cnt = CordTester(tests_parallel[test_slice_start:test_slice_end],
-                              instance = container, num_instances = num_test_containers,
-                              ctlr_ip = ctlr_addr,
-                              name = args.container,
-                              image = nose_cnt['image'],
-                              prefix = Container.IMAGE_PREFIX,
-                              tag = nose_cnt['tag'],
-                              env = test_cnt_env,
-                              rm = False if args.keep else True,
-                              update = update_map['test'],
-                              network = test_manifest.docker_network,
-                              radius = radius)
-        test_slice_start = test_slice_end
-        test_slice_end = test_slice_start + tests_per_container
-        update_map['test'] = False
-        test_containers.append(test_cnt)
-        if not test_cnt.create:
-            continue
-        if test_cnt.create and (test_manifest.start_switch or not test_manifest.olt):
-            if not args.no_switch:
-                test_cnt.start_switch(test_manifest)
-        if test_cnt.create and test_cnt.olt:
-            _, port_num = test_cnt.setup_intfs(port_num = port_num)
-
-    status = 0
-    if len(test_containers) > 1:
-        thread_pool = ThreadPool(len(test_containers), queue_size = 1, wait_timeout=1)
-        for test_cnt in test_containers:
-            thread_pool.addTask(test_cnt.run_tests)
-        thread_pool.cleanUpThreads()
-    else:
-        if test_containers:
-            status = test_containers[0].run_tests()
-
-    ##Run the linear tests
-    if tests_not_parallel:
-        test_cnt = CordTester(tests_not_parallel,
-                              ctlr_ip = ctlr_addr,
-                              name = args.container,
-                              image = nose_cnt['image'],
-                              prefix = Container.IMAGE_PREFIX,
-                              tag = nose_cnt['tag'],
-                              env = test_cnt_env,
-                              rm = False if args.keep else True,
-                              update = update_map['test'],
-                              network = test_manifest.docker_network)
-        if test_cnt.create and (test_manifest.start_switch or not test_manifest.olt):
-            #For non parallel tests, we just restart the switch also for OLT's
-            CordTester.switch_on_olt = False
-            if not args.no_switch:
-                test_cnt.start_switch(test_manifest)
-        if test_cnt.create and test_cnt.olt:
-            test_cnt.setup_intfs(port_num = port_num)
-        test_cnt.run_tests()
-
-    if test_server:
-        if onos_cord:
-            onos_cord.restore()
-        cord_test_server_stop(test_server)
-
-    return status
-
-##Starts onos/radius/quagga containers as appropriate
-def setupCordTester(args):
-    onos_cnt = {'tag':'latest'}
-    nose_cnt = {'image': CordTester.IMAGE, 'tag': 'candidate'}
-    update_map = { 'quagga' : False, 'radius' : False, 'test': False }
-    update_map[args.update.lower()] = True
-    test_manifest = TestManifest(args = args)
-
-    if args.update.lower() == 'all':
-       for c in update_map.keys():
-           update_map[c] = True
-
-    use_manifest = False
-    if args.manifest:
-        if os.access(args.manifest, os.F_OK):
-            ##copy it to setup directory
-            dest = os.path.join(CordTester.tester_base,
-                                os.path.basename(args.manifest))
-            if os.path.abspath(args.manifest) != dest:
-                try:
-                    shutil.copy(args.manifest, dest)
-                except: pass
-            test_manifest = TestManifest(manifest = dest)
-            use_manifest = True
-
-    onos_ip = test_manifest.onos_ip
-    radius_ip = test_manifest.radius_ip
-    radius = None
-    head_node = test_manifest.head_node
-    iterations = test_manifest.iterations
-    service_profile = test_manifest.service_profile
-    synchronizer = test_manifest.synchronizer
-    voltha_loc = test_manifest.voltha_loc
-    voltha_intf = test_manifest.voltha_intf
-    onos_cord = None
-    onos_cord_loc = test_manifest.onos_cord
-    Onos.update_data_dir(test_manifest.karaf_version)
-    Onos.set_expose_port(test_manifest.expose_port)
-    olt_config_file = test_manifest.olt_config
-    if not os.access(olt_config_file, os.F_OK):
-        olt_config_file = os.path.join(CordTester.tester_base, 'olt_config.json')
-    else:
-        dest = os.path.join(CordTester.tester_base,
-                            os.path.basename(olt_config_file))
-        if os.path.abspath(olt_config_file) != dest:
-            try:
-                shutil.copy(olt_config_file, dest)
-            except: pass
-
-    Radius.create_network()
-    if onos_cord_loc:
-        if onos_cord_loc.find(os.path.sep) < 0:
-            onos_cord_loc = os.path.join(os.getenv('HOME'), onos_cord_loc)
-        if not os.access(onos_cord_loc, os.F_OK):
-            print('ONOS cord config location %s is not accessible' %onos_cord_loc)
-            sys.exit(1)
-        if not onos_ip:
-            ##Unexpected case. Specify the external controller ip when running on cord node
-            print('Specify ONOS ip using \"-e\" option when running the cord-tester on cord node')
-            sys.exit(1)
-        if not service_profile:
-            print('Specify service profile for the ONOS cord instance. Eg: rcord')
-            sys.exit(1)
-        if not synchronizer:
-            print('Specify synchronizer to use for the ONOS cord instance. Eg: vtn, fabric, cord')
-            sys.exit(1)
-        onos_cord = OnosCord(onos_ip, onos_cord_loc, service_profile, synchronizer, skip = test_manifest.skip_onos_restart)
-
-    Container.IMAGE_PREFIX = test_manifest.image_prefix
-    #don't spawn onos if the user had started it externally
-    image_names = test_manifest.onos_image.rsplit(':', 1)
-    onos_cnt['image'] = image_names[0]
-    if len(image_names) > 1:
-        if image_names[1].find('/') < 0:
-            onos_cnt['tag'] = image_names[1]
-        else:
-            #tag cannot have slashes
-            onos_cnt['image'] = test_manifest.onos_image
-
-    Onos.IMAGE = onos_cnt['image']
-    Onos.PREFIX = test_manifest.image_prefix
-    Onos.TAG = onos_cnt['tag']
-    Onos.MAX_INSTANCES = test_manifest.onos_instances
-    Onos.JVM_HEAP_SIZE = test_manifest.jvm_heap_size
-    cluster_mode = True if test_manifest.onos_instances > 1 else False
-    async_mode = cluster_mode and test_manifest.async_mode
-    existing_list = [ c['Names'][0][1:] for c in Container.dckr.containers() if c['Image'] == test_manifest.onos_image ]
-    setup_cluster = False if len(existing_list) == test_manifest.onos_instances else True
-    #cleanup existing volumes before forming a new cluster
-    if setup_cluster is True:
-        print('Cleaning up existing cluster volumes')
-        data_dir = os.path.join(Onos.setup_dir, 'cord-onos*-data')
-        try:
-            os.system('rm -rf {}'.format(data_dir))
-        except: pass
-
-    onos = None
-    onos_ips = []
-    if voltha_loc:
-        voltha_key = os.path.join(voltha_loc, 'docker', 'onos_cfg', 'onos.jks')
-        Onos.update_ssl_key(voltha_key)
-    if onos_ip is None:
-        data_volume = '{}-data'.format(Onos.NAME) if test_manifest.shared_volume else None
-        onos = Onos(image = Onos.IMAGE, tag = Onos.TAG, boot_delay = 60, cluster = cluster_mode,
-                    data_volume = data_volume, async = async_mode, network = test_manifest.docker_network)
-        if onos.running:
-            onos_ips.append(onos.ipaddr)
-    else:
-        onos_ips.append(onos_ip)
-
-    num_onos_instances = test_manifest.onos_instances
-    if num_onos_instances > 1 and onos is not None:
-        onos_instances = []
-        onos_instances.append(onos)
-        for i in range(1, num_onos_instances):
-            name = '{}-{}'.format(Onos.NAME, i+1)
-            data_volume = '{}-data'.format(name) if test_manifest.shared_volume else None
-            quagga_config = Onos.get_quagga_config(i)
-            onos = Onos(name = name, image = Onos.IMAGE, tag = Onos.TAG, boot_delay = 60, cluster = cluster_mode,
-                        data_volume = data_volume, async = async_mode,
-                        quagga_config = quagga_config, network = test_manifest.docker_network, instance = i)
-            onos_instances.append(onos)
-            if onos.running:
-                onos_ips.append(onos.ipaddr)
-        if async_mode is True:
-            Onos.start_cluster_async(onos_instances)
-        if not onos_ips:
-            for onos in onos_instances:
-                onos_ips.append(onos.ipaddr)
-        if setup_cluster is True:
-            Onos.setup_cluster(onos_instances)
-
-    ctlr_addr = ','.join(onos_ips)
-    print('Onos IP %s' %ctlr_addr)
-    if not test_manifest.skip_onos_restart:
-        if onos_ip is not None:
-            print('Installing ONOS cord apps')
-            try:
-                Onos.install_cord_apps(onos_ip = onos_ip)
-            except: pass
-
-        print('Installing cord tester ONOS app %s' %args.app)
-        try:
-            for ip in onos_ips:
-                OnosCtrl.install_app(args.app, onos_ip = ip)
-        except: pass
-
-    if voltha_loc:
-        #start voltha
-        voltha = VolthaService(voltha_loc, onos_ips[0], interface = voltha_intf,
-                               olt_config = olt_config_file, container_mode = test_manifest.voltha_container_mode)
-        voltha.start()
-
-    ##Start Radius container if not started
-    if radius_ip is None:
-        radius = Radius(prefix = Container.IMAGE_PREFIX, update = update_map['radius'],
-                        network = test_manifest.docker_network,
-                        olt_config = olt_config_file)
-        radius_ip = radius.ip(network = Radius.NETWORK)
-
-    print('Radius server running with IP %s' %radius_ip)
-
-    if args.quagga == True:
-        #Start quagga. Builds container if required
-        quagga = Quagga(prefix = Container.IMAGE_PREFIX, update = update_map['quagga'],
-                        network = test_manifest.docker_network)
-        print('Quagga started')
-
-    params = test_manifest.server.split(':')
-    ip = params[0]
-    port = CORD_TEST_PORT
-    if len(params) > 1:
-        port = int(params[1])
-
-    try:
-        maas_api_key = FabricMAAS.get_api_key()
-    except:
-        maas_api_key = 'UNKNOWN'
-
-    ssh_key_file = set_ssh_key_file(args.identity_file)
-
-    #provision the test container
-    if not args.dont_provision:
-        test_cnt_env = { 'ONOS_CONTROLLER_IP' : ctlr_addr,
-                         'ONOS_AAA_IP' : radius_ip if radius_ip is not None else '',
-                         'QUAGGA_IP': ip,
-                         'CORD_TEST_HOST' : ip,
-                         'CORD_TEST_PORT' : port,
-                         'ONOS_RESTART' : 0 if test_manifest.olt and args.test_controller else 1,
-                         'LOG_LEVEL': test_manifest.log_level,
-                         'HEAD_NODE': head_node if head_node else CORD_TEST_HOST,
-                         'MAAS_API_KEY': maas_api_key,
-                         'KARAF_VERSION' : test_manifest.karaf_version,
-                         'VOLTHA_ENABLED' : int(test_manifest.voltha_enable)
-                       }
-
-        if ssh_key_file:
-            test_cnt_env['SSH_KEY_FILE'] = ssh_key_file
-        olt_conf_test_loc = os.path.join(CordTester.sandbox_setup, os.path.basename(olt_config_file))
-        test_cnt_env['OLT_CONFIG_FILE'] = olt_conf_test_loc
-        if test_manifest.olt:
-            test_cnt_env['OLT_CONFIG'] = olt_conf_test_loc
-        if test_manifest.iterations is not None:
-            test_cnt_env['ITERATIONS'] = iterations
-        if use_manifest:
-            test_cnt_env['MANIFEST'] = os.path.join(CordTester.sandbox_setup,
-                                                    os.path.basename(args.manifest))
-
-        openstack_setup(test_cnt_env)
-
-        test_cnt = CordTester((),
-                              ctlr_ip = ctlr_addr,
-                              image = nose_cnt['image'],
-                              prefix = Container.IMAGE_PREFIX,
-                              tag = nose_cnt['tag'],
-                              env = test_cnt_env,
-                              rm = False,
-                              update = update_map['test'],
-                              network = test_manifest.docker_network,
-                              radius = radius)
-
-        if test_manifest.start_switch or not test_manifest.olt:
-            test_cnt.start_switch(test_manifest)
-        if test_cnt.olt:
-            test_cnt.setup_intfs(port_num = 0)
-        if test_manifest.setup_dhcpd and test_manifest.start_switch:
-           test_cnt.setup_dhcpd(test_manifest)
-        print('Test container %s started and provisioned to run tests using nosetests' %(test_cnt.name))
-
-    #Finally start the test server and daemonize
-    try:
-        cord_test_server_start(daemonize = not args.foreground, cord_test_host = ip, cord_test_port = port,
-                               onos_cord = onos_cord, foreground = args.foreground)
-    except socket.error, e:
-        #the test agent address could be remote or already running. Exit gracefully
-        sys.exit(0)
-
-    return 0
-
-def cleanupTests(args):
-    if args.manifest and os.access(args.manifest, os.F_OK):
-        manifest = TestManifest(manifest = args.manifest)
-        args.prefix = manifest.image_prefix
-        args.olt = manifest.olt
-        args.olt_config = manifest.olt_config
-        args.onos = manifest.onos_image
-        args.server = manifest.server
-        args.onos_ip = manifest.onos_ip
-        args.radius_ip = manifest.radius_ip
-        args.onos_cord = manifest.onos_cord
-        args.service_profile = manifest.service_profile
-        args.synchronizer = manifest.synchronizer
-        args.voltha_loc = manifest.voltha_loc
-    else:
-        args.onos_ip = None
-        args.radius_ip = None
-        if args.test_controller:
-            ips = args.test_controller.split('/')
-            args.onos_ip = ips[0]
-            if len(ips) > 1:
-                args.radius_ip = ips[1]
-
-    image_name = args.onos
-    prefix = args.prefix
-    if prefix:
-        prefix += '/'
-    test_container = '{}{}:candidate'.format(prefix, CordTester.IMAGE)
-    print('Cleaning up Test containers ...')
-    Container.cleanup(test_container)
-    if args.olt:
-        print('Cleaning up test container OLT configuration')
-        CordTester.cleanup_intfs(args.olt_config)
-
-    onos_list = [ c['Names'][0][1:] for c in Container.dckr.containers() if c['Image'] == image_name ]
-    if len(onos_list) > 1:
-        for onos in onos_list:
-            Container.dckr.kill(onos)
-            Container.dckr.remove_container(onos, force=True)
-        for index in range(len(onos_list)):
-            volume = '{}-data'.format(Onos.NAME) if index == 0 else '{}-{}-data'.format(Onos.NAME, index+1)
-            Onos.remove_data_map(volume, Onos.guest_data_dir)
-        Onos.cleanup_runtime()
-
-    radius_container = '{}{}:candidate'.format(prefix, Radius.IMAGE)
-    quagga_container = '{}{}:candidate'.format(prefix, Quagga.IMAGE)
-    Container.cleanup(radius_container)
-    radius_restore_users()
-    Container.cleanup(quagga_container)
-    if args.voltha_loc:
-        voltha = VolthaService(args.voltha_loc, args.onos_ip)
-        voltha.stop()
-
-    if args.onos_cord:
-        #try restoring the onos cord instance
-        try:
-            onos_cord = OnosCord(args.onos_ip, args.onos_cord, args.service_profile, args.synchronizer, start = False, skip = manifest.skip_onos_restart)
-            onos_cord.restore(force = True)
-        except Exception as e:
-            print(e)
-
-    if args.xos:
-        ##cleanup XOS images
-        xos_images = ( '{}:{}'.format(XosServer.IMAGE,XosServer.TAG),
-                       '{}:{}'.format(XosSynchronizerOpenstack.IMAGE,
-                                      XosSynchronizerOpenstack.TAG),
-                       '{}:{}'.format(XosSynchronizerOnboarding.IMAGE,
-                                      XosSynchronizerOnboarding.TAG),
-                       '{}:{}'.format(XosSynchronizerOpenvpn.IMAGE,
-                                      XosSynchronizerOpenvpn.TAG),
-                       '{}:{}'.format(XosPostgresql.IMAGE,
-                                      XosPostgresql.TAG),
-                       '{}:{}'.format(XosSyndicateMs.IMAGE,
-                                      XosSyndicateMs.TAG),
-                       )
-        for img in xos_images:
-            print('Cleaning up XOS image: %s' %img)
-            Container.cleanup(img)
-
-    server_params = args.server.split(':')
-    server_host = server_params[0]
-    server_port = CORD_TEST_PORT
-    if len(server_params) > 1:
-        server_port = int(server_params[1])
-    cord_test_server_shutdown(server_host, server_port)
-    return 0
-
-def listTests(args):
-    if args.test == 'all':
-        tests = CordTester.ALL_TESTS
-    else:
-        tests = args.test.split('-')
-    CordTester.list_tests(tests)
-    return 0
-
-def getMetrics(args):
-    try:
-        detail = c.inspect_container(args.container)
-    except:
-        print('Unknown container %s' %args.container)
-        return 0
-    user_hz = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
-    state = detail["State"]
-    if bool(state["Paused"]):
-       print("Container is in Paused State")
-    elif bool(state["Running"]):
-       print("Container is in Running State")
-    elif int(state["ExitCode"]) == 0:
-       print("Container is in Stopped State")
-    else:
-       print("Container is in Crashed State")
-
-    print("Ip Address of the container: " +detail['NetworkSettings']['IPAddress'])
-
-    if bool(detail["State"]["Running"]):
-        container_id = detail['Id']
-        cpu_usage = {}
-        cur_usage = 0
-        last_usage = 0
-        for i in range(2):
-            with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f:
-                for line in f:
-                    m = re.search(r"(system|user)\s+(\d+)", line)
-                    if m:
-                        cpu_usage[m.group(1)] = int(m.group(2))
-                cpu = cpu_usage["system"] + cpu_usage["user"]
-                last_usage = cur_usage
-                cur_usage = cpu
-                time.sleep(1)
-        cpu_percent = (cur_usage - last_usage)*100.0/user_hz
-        print("CPU Usage: %.2f %%" %(cpu_percent))
-    else:
-        print(0)
-
-    if bool(detail["State"]["Running"]):
-        container_id = detail['Id']
-        print("Docker Port Info:")
-        cmd = "sudo docker port {}".format(container_id)
-        os.system(cmd)
-
-    if bool(detail["State"]["Running"]):
-        container_id = detail['Id']
-        with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.stat', 'r') as f:
-            for line in f:
-                m = re.search(r"total_rss\s+(\d+)", line)
-                if m:
-                    mem = int(m.group(1))
-                    print("Memory: %s KB "%(mem/1024.0))
-                o = re.search(r"usage\s+(\d+)", line)
-                if o:
-                    print("Usage: %s "%(o.group(1)))
-                p = re.search(r"max_usage\s+(\d+)", line)
-                if p:
-                    print("Max Usage: %s "%(p.group(1)))
-
-    if bool(detail["State"]["Running"]):
-        container_id = detail['Id']
-        with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f:
-            for line in f:
-                m = re.search(r"user\s+(\d+)", line)
-                if m:
-                    user_ticks = int(m.group(1))
-                    print("Time spent by running processes: %.2f ms"%(user_ticks*1000.0/user_hz))
-    print("List Networks:")
-    cmd = "docker network ls"
-    os.system(cmd)
-    return 0
-
-def buildImages(args):
-    tag = 'candidate'
-    prefix = args.prefix
-    if prefix:
-        prefix += '/'
-    if args.image == 'all' or args.image == 'quagga':
-        image_name = '{}{}:{}'.format(prefix, Quagga.IMAGE, tag)
-        Quagga.build_image(image_name)
-
-    if args.image == 'all' or args.image == 'radius':
-        image_name = '{}{}:{}'.format(prefix, Radius.IMAGE, tag)
-        Radius.build_image(image_name)
-
-    if args.image == 'all' or args.image == 'test':
-        image_name = '{}{}:{}'.format(prefix, CordTester.IMAGE, tag)
-        CordTester.build_image(image_name)
-
-    return 0
-
-def startImages(args):
-    ##starts the latest ONOS image
-    onos_cnt = {'tag': 'latest'}
-    image_names = args.onos.rsplit(':', 1)
-    onos_cnt['image'] = image_names[0]
-    if len(image_names) > 1:
-        if image_names[1].find('/') < 0:
-            onos_cnt['tag'] = image_names[1]
-        else:
-            #tag cannot have slashes
-            onos_cnt['image'] = args.onos
-
-    if args.image == 'all' or args.image == 'onos':
-        onos = Onos(image = onos_cnt['image'], tag = onos_cnt['tag'])
-        print('ONOS started with ip %s' %(onos.ip()))
-
-    if args.image == 'all' or args.image == 'quagga':
-        quagga = Quagga(prefix = args.prefix)
-        print('Quagga started with ip %s' %(quagga.ip()))
-
-    if args.image == 'all' or args.image == 'radius':
-        radius = Radius(prefix = args.prefix)
-        print('Radius started with ip %s' %(radius.ip()))
-
-    return 0
-
-def xosCommand(args):
-    update = False
-    profile = args.profile
-    if args.command == 'update':
-        update = True
-    xos = XosServiceProfile(profile = profile, update = update)
-    if args.command == 'build':
-        xos.build_images(force = True)
-    if args.command == 'start':
-        xos.start_services()
-    if args.command == 'stop':
-        xos.stop_services(rm = True)
-    return 0
-
-if __name__ == '__main__':
-    parser = ArgumentParser(description='Cord Tester')
-    subparser = parser.add_subparsers()
-    parser_run = subparser.add_parser('run', help='Run cord tester')
-    parser_run.add_argument('-t', '--test-type', default=test_type_default, help='Specify test type or test case to run')
-    parser_run.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
-    parser_run.add_argument('-q', '--quagga',action='store_true',help='Provision quagga container for vrouter')
-    parser_run.add_argument('-a', '--app', default=onos_app_file, type=str, help='Cord ONOS app filename')
-    parser_run.add_argument('-l', '--olt', action='store_true', help='Use OLT config')
-    parser_run.add_argument('-olt-config', '--olt-config', default=olt_config_default, type=str, help='Provide OLT configuration')
-    parser_run.add_argument('-e', '--test-controller', default='', type=str, help='External test controller ip for Onos and/or radius server. '
-                        'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip to connect')
-    parser_run.add_argument('-r', '--server', default=cord_test_server_address, type=str,
-                            help='ip:port address to connect for cord test server for container requests')
-    parser_run.add_argument('-k', '--keep', action='store_true', help='Keep test container after tests')
-    parser_run.add_argument('-s', '--start-switch', action='store_true', help='Start OVS when running under OLT config')
-    parser_run.add_argument('-dh', '--setup-dhcpd', action='store_true', help='Start dhcpd Server in cord-tester test container')
-    parser_run.add_argument('-u', '--update', default='none', choices=['test','quagga','radius', 'all'], type=str, help='Update cord tester container images. '
-                        'Eg: --update=quagga to rebuild quagga image.'
-                        '    --update=radius to rebuild radius server image.'
-                        '    --update=test to rebuild cord test image.(Default)'
-                        '    --update=all to rebuild all cord tester images.')
-    parser_run.add_argument('-n', '--num-containers', default=1, type=int,
-                            help='Specify number of test containers to spawn for tests')
-    parser_run.add_argument('-c', '--container', default='', type=str, help='Test container name for running tests')
-    parser_run.add_argument('-m', '--manifest', default='', type=str, help='Provide test configuration manifest')
-    parser_run.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
-    parser_run.add_argument('-d', '--no-switch', action='store_true', help='Dont start test switch.')
-    parser_run.add_argument('-i', '--identity-file', default=identity_file_default,
-                            type=str, help='ssh identity file to access compute nodes from test container')
-    parser_run.add_argument('-j', '--onos-instances', default=1, type=int,
-                            help='Specify number to test onos instances to form cluster')
-    parser_run.add_argument('-v', '--shared-volume', action='store_true', help='Start ONOS cluster instances with shared volume')
-    parser_run.add_argument('-async', '--async-mode', action='store_true',
-                            help='Start ONOS cluster instances in async mode')
-    parser_run.add_argument('-log', '--log-level', default=onos_log_level,
-                            choices=['DEBUG','TRACE','ERROR','WARN','INFO'],
-                            type=str,
-                            help='Specify the log level for the test cases')
-    parser_run.add_argument('-jvm-heap-size', '--jvm-heap-size', default='', type=str, help='ONOS JVM heap size')
-    parser_run.add_argument('-network', '--network', default='', type=str, help='Docker network to attach')
-    parser_run.add_argument('-onos-cord', '--onos-cord', default='', type=str,
-                            help='Specify config location for ONOS cord when running on podd')
-    parser_run.add_argument('-service-profile', '--service-profile', default='', type=str,
-                            help='Specify config location for ONOS cord service profile when running on podd.'
-                            'Eg: $HOME/service-profile/cord-pod')
-    parser_run.add_argument('-synchronizer', '--synchronizer', default='', type=str,
-                            help='Specify the synchronizer to use for ONOS cord instance when running on podd.'
-                            'Eg: vtn,fabric,cord')
-    parser_run.add_argument('-karaf', '--karaf', default='3.0.8', type=str, help='Karaf version for ONOS')
-    parser_run.add_argument('-voltha-loc', '--voltha-loc', default='', type=str,
-                            help='Specify the voltha location in order to start voltha')
-    parser_run.add_argument('-voltha-intf', '--voltha-intf', default='eth0', type=str,
-                            help='Specify the voltha interface for voltha to listen')
-    parser_run.add_argument('-voltha-enable', '--voltha-enable', action='store_true',
-                            help='Run the tests with voltha environment enabled')
-    parser_run.add_argument('-voltha-container-mode', '--voltha-container-mode', action='store_true',
-                            help='Run the tests with voltha container environment enabled')
-    parser_run.add_argument('-expose-port', '--expose-port', action='store_true',
-                            help='Start ONOS by exposing the controller ports to the host.'
-                            'Add +1 for every other onos/cluster instance when running more than 1 ONOS instances')
-    parser_run.add_argument('-skip-onos-restart', '--skip-onos-restart', action='store_true',
-                            help = 'Skips restarting/configuring of onoscord')
-    parser_run.set_defaults(func=runTest)
-
-    parser_setup = subparser.add_parser('setup', help='Setup cord tester environment')
-    parser_setup.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
-    parser_setup.add_argument('-r', '--server', default=cord_test_server_address, type=str,
-                              help='ip:port address for cord test server to listen for container restart requests')
-    parser_setup.add_argument('-q', '--quagga',action='store_true',help='Provision quagga container for vrouter')
-    parser_setup.add_argument('-a', '--app', default=onos_app_file, type=str, help='Cord ONOS app filename')
-    parser_setup.add_argument('-e', '--test-controller', default='', type=str, help='External test controller ip for Onos and/or radius server. '
-                        'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip to connect')
-    parser_setup.add_argument('-u', '--update', default='none', choices=['quagga','radius', 'all'], type=str, help='Update cord tester container images. '
-                        'Eg: --update=quagga to rebuild quagga image.'
-                        '    --update=radius to rebuild radius server image.'
-                        '    --update=all to rebuild all cord tester images.')
-    parser_setup.add_argument('-d', '--dont-provision', action='store_true', help='Dont start test container.')
-    parser_setup.add_argument('-l', '--olt', action='store_true', help='Use OLT config')
-    parser_setup.add_argument('-olt-config', '--olt-config', default=olt_config_default, type=str, help='Provide OLT configuration')
-    parser_setup.add_argument('-log', '--log-level', default=onos_log_level, type=str,
-                              choices=['DEBUG','TRACE','ERROR','WARN','INFO'],
-                              help='Specify the log level for the test cases')
-    parser_setup.add_argument('-s', '--start-switch', action='store_true', help='Start OVS when running under OLT config')
-    parser_setup.add_argument('-dh', '--setup-dhcpd', action='store_true', help='Start dhcpd Server in cord-tester container')
-    parser_setup.add_argument('-onos-cord', '--onos-cord', default='', type=str,
-                              help='Specify config location for ONOS cord when running on podd')
-    parser_setup.add_argument('-service-profile', '--service-profile', default='', type=str,
-                              help='Specify config location for ONOS cord service profile when running on podd.'
-                              'Eg: $HOME/service-profile/cord-pod')
-    parser_setup.add_argument('-synchronizer', '--synchronizer', default='', type=str,
-                              help='Specify the synchronizer to use for ONOS cord instance when running on podd.'
-                              'Eg: vtn,fabric,cord')
-    parser_setup.add_argument('-m', '--manifest', default='', type=str, help='Provide test configuration manifest')
-    parser_setup.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
-    parser_setup.add_argument('-i', '--identity-file', default=identity_file_default,
-                              type=str, help='ssh identity file to access compute nodes from test container')
-    parser_setup.add_argument('-n', '--onos-instances', default=1, type=int,
-                              help='Specify number of test onos instances to spawn')
-    parser_setup.add_argument('-v', '--shared-volume', action='store_true',
-                              help='Start ONOS cluster instances with shared volume')
-    parser_setup.add_argument('-async', '--async-mode', action='store_true',
-                              help='Start ONOS cluster instances in async mode')
-    parser_setup.add_argument('-f', '--foreground', action='store_true', help='Run in foreground')
-    parser_setup.add_argument('-jvm-heap-size', '--jvm-heap-size', default='', type=str, help='ONOS JVM heap size')
-    parser_setup.add_argument('-network', '--network', default='', type=str, help='Docker network to attach')
-    parser_setup.add_argument('-karaf', '--karaf', default='3.0.8', type=str, help='Karaf version for ONOS')
-    parser_setup.add_argument('-voltha-loc', '--voltha-loc', default='', type=str,
-                              help='Specify the voltha location in order to start voltha')
-    parser_setup.add_argument('-voltha-intf', '--voltha-intf', default='eth0', type=str,
-                              help='Specify the voltha interface for voltha to listen')
-    parser_setup.add_argument('-voltha-enable', '--voltha-enable', action='store_true',
-                              help='Run the tests with voltha environment enabled')
-    parser_setup.add_argument('-voltha-container-mode', '--voltha-container-mode', action='store_true',
-                              help='Run the tests with voltha container environment enabled')
-    parser_setup.add_argument('-expose-port', '--expose-port', action='store_true',
-                              help='Start ONOS by exposing the controller ports to the host.'
-                              'Add +1 for every other onos/cluster instance when running more than 1 ONOS instances')
-    parser_setup.add_argument('-skip-onos-restart', '--skip-onos-restart', action='store_true',
-                            help = 'Skips restarting/configuring of onoscord')
-    parser_setup.set_defaults(func=setupCordTester)
-
-    parser_xos = subparser.add_parser('xos', help='Building xos into cord tester environment')
-    parser_xos.add_argument('command', choices=['build', 'update', 'start', 'stop'])
-    parser_xos.add_argument('-p', '--profile', default='cord-pod', type=str, help='Provide service profile')
-    parser_xos.set_defaults(func=xosCommand)
-
-    parser_list = subparser.add_parser('list', help='List test cases')
-    parser_list.add_argument('-t', '--test', default='all', help='Specify test type to list test cases. '
-                             'Eg: -t tls to list tls test cases.'
-                             '    -t tls-dhcp-vrouter to list tls,dhcp and vrouter test cases.'
-                             '    -t all to list all test cases.')
-    parser_list.set_defaults(func=listTests)
-
-    parser_build = subparser.add_parser('build', help='Build cord test container images')
-    parser_build.add_argument('image', choices=['quagga', 'radius', 'test','all'])
-    parser_build.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
-    parser_build.set_defaults(func=buildImages)
-
-    parser_metrics = subparser.add_parser('metrics', help='Info of container')
-    parser_metrics.add_argument("container", help="Container name")
-    parser_metrics.set_defaults(func=getMetrics)
-
-    parser_start = subparser.add_parser('start', help='Start cord tester containers')
-    parser_start.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
-    parser_start.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
-    parser_start.add_argument('image', choices=['onos', 'quagga', 'radius', 'all'])
-    parser_start.set_defaults(func=startImages)
-
-    parser_cleanup = subparser.add_parser('cleanup', help='Cleanup test containers')
-    parser_cleanup.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
-    parser_cleanup.add_argument('-l', '--olt', action = 'store_true', help = 'Cleanup OLT config')
-    parser_cleanup.add_argument('-olt-config', '--olt-config', default=olt_config_default, type=str, help='Provide OLT configuration')
-    parser_cleanup.add_argument('-o', '--onos', default=onos_image_default, type=str,
-                                help='ONOS container image to cleanup')
-    parser_cleanup.add_argument('-x', '--xos', action='store_true',
-                                help='Cleanup XOS containers')
-    parser_cleanup.add_argument('-r', '--server', default=cord_test_server_address, type=str,
-                                help='ip:port address for cord test server to cleanup')
-    parser_cleanup.add_argument('-e', '--test-controller', default='', type=str,
-                                help='External test controller ip for Onos and/or radius server. '
-                                'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip')
-    parser_cleanup.add_argument('-onos-cord', '--onos-cord', default='', type=str,
-                                help='Specify config location for ONOS cord instance when running on podd to restore')
-    parser_cleanup.add_argument('-service-profile', '--service-profile', default='', type=str,
-                                help='Specify config location for ONOS cord service profile when running on podd.'
-                                'Eg: $HOME/service-profile/cord-pod')
-    parser_cleanup.add_argument('-synchronizer', '--synchronizer', default='', type=str,
-                                help='Specify the synchronizer to use for ONOS cord instance when running on podd.'
-                                'Eg: vtn,fabric,cord')
-    parser_cleanup.add_argument('-m', '--manifest', default='', type=str, help='Provide test manifest')
-    parser_cleanup.add_argument('-voltha-loc', '--voltha-loc', default='', type=str,
-                                help='Specify the voltha location')
-    parser_cleanup.add_argument('-skip-onos-restart', '--skip-onos-restart', action='store_true',
-                            help = 'Skips restarting/configuring of onoscord')
-    parser_cleanup.set_defaults(func=cleanupTests)
-
-    c = Client(**(kwargs_from_env()))
-
-    args = parser.parse_args()
-    res = args.func(args)
-    sys.exit(res)
diff --git a/src/test/setup/cord-tester b/src/test/setup/cord-tester
deleted file mode 100755
index 786517e..0000000
--- a/src/test/setup/cord-tester
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-setup_path=$(dirname $(realpath $0))
-source $setup_path/venv/bin/activate
-$setup_path/cord-test.py $*
-
diff --git a/src/test/setup/cpqd.sh b/src/test/setup/cpqd.sh
deleted file mode 100755
index 9e70d5f..0000000
--- a/src/test/setup/cpqd.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-dpid=${1:-001122334455}
-num_ports=${2:-200}
-controller=${3:-$ONOS_CONTROLLER_IP}
-num_ports=$(($num_ports-1))
-my_ip=`ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d":" -f2 |cut -d" " -f1`
-if_list="veth1"
-for port in $(seq 3 2 $num_ports); do
-    if_list="$if_list"",""veth$port"
-done
-service openvswitch-switch stop
-nohup ofdatapath --no-slicing --datapath-id=$dpid --interfaces=$if_list ptcp:6653 2>&1 >/tmp/nohup.out &
-nohup ofprotocol tcp:$my_ip:6653 tcp:$controller:6633 2>&1 >/tmp/nohup.out &
diff --git a/src/test/setup/eval.sh b/src/test/setup/eval.sh
deleted file mode 100755
index a92caee..0000000
--- a/src/test/setup/eval.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cord_tester="$(dirname $0)/cord-test.py"
-if [ ! -f $cord_tester ]; then
-  cord_tester="$HOME/cord-tester/src/test/setup/cord-test.py"
-fi
-echo "Building all cord-tester images."
-$cord_tester build all
-docker kill cord-onos || true
-docker kill cord-quagga || true
-docker kill cord-radius || true
-function finish {
-    $cord_tester cleanup --olt
-    pkill -f cord-test
-}
-trap finish EXIT
-$cord_tester setup --olt --start-switch
-cnt=`docker ps -lq`
-echo "Running TLS authentication test"
-docker exec $cnt nosetests -v /root/test/src/test/tls/tlsTest.py:eap_auth_exchange.test_eap_tls
-echo "Running DHCP relay request test"
-docker exec $cnt nosetests -v /root/test/src/test/dhcprelay/dhcprelayTest.py:dhcprelay_exchange.test_dhcpRelay_1request
-echo "Running IGMP join verify test"
-docker exec $cnt nosetests -v /root/test/src/test/igmp/igmpTest.py:igmp_exchange.test_igmp_join_verify_traffic
-echo "Running VROUTER test with 5 routes"
-docker exec $cnt nosetests -v /root/test/src/test/vrouter/vrouterTest.py:vrouter_exchange.test_vrouter_with_5_routes
-echo "Running CORD subscriber tests"
-docker exec $cnt nosetests -v /root/test/src/test/cordSubscriber/cordSubscriberTest.py:subscriber_exchange.test_cord_subscriber_join_recv
diff --git a/src/test/setup/flask-requirements.txt b/src/test/setup/flask-requirements.txt
deleted file mode 100644
index 62e669e..0000000
--- a/src/test/setup/flask-requirements.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-flask
-flask-login
-flask-openid
-flask-mail
-flask-sqlalchemy
-sqlalchemy-migrate
-flask-whooshalchemy
-flask-wtf
-flask-babel
-guess_language
-flipflop
-coverage
diff --git a/src/test/setup/flex_fabric_test_netcfg.json b/src/test/setup/flex_fabric_test_netcfg.json
deleted file mode 100644
index 93b3db3..0000000
--- a/src/test/setup/flex_fabric_test_netcfg.json
+++ /dev/null
@@ -1,100 +0,0 @@
-{
-    "devices": {
-        "of:0000cc37ab5b6da8": {
-            "segmentrouting": {
-                "name": "device-cc37ab5b6da8",
-                "ipv4NodeSid": 100,
-                "ipv4Loopback": "10.6.0.104",
-                "routerMac": "cc:37:ab:5b:6d:a8",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        },
-        "of:0000cc37abd93769": {
-            "segmentrouting": {
-                "name": "device-cc37abd93769",
-                "ipv4NodeSid": 101,
-                "ipv4Loopback": "10.6.0.103",
-                "routerMac": "cc:37:ab:d9:37:69",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        },
-        "of:0000cc37abb6b564": {
-            "segmentrouting": {
-                "name": "device-cc37abb6b564",
-                "ipv4NodeSid": 102,
-                "ipv4Loopback": "10.6.0.102",
-                "routerMac": "cc:37:ab:b6:b5:64",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        },
-        "of:0000cc37abd9386d": {
-            "segmentrouting": {
-                "name": "device-cc37abd9386d",
-                "ipv4NodeSid": 103,
-                "ipv4Loopback": "10.6.0.101",
-                "routerMac": "cc:37:ab:d9:38:6d",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        }
-    },
-    "ports": {
-        "of:0000cc37abb6b564/2": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.2.254/24" ],
-                    "vlan-untagged" : 2
-                }
-            ]
-        },
-        "of:0000cc37abd93769/2": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.1.254/24" ],
-                    "vlan-untagged" : 1
-                }
-            ]
-        },
-        "of:0000cc37abb6b564/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.2.254/24" ],
-                    "vlan-untagged" : 2
-                }
-            ]
-        },
-        "of:0000cc37abd93769/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.1.254/24" ],
-                    "vlan-untagged" : 1
-                }
-            ]
-        }
-    },
-    "apps" : {
-        "org.onosproject.segmentrouting" : {
-            "segmentrouting" : {
-                "vRouterMacs" : [ "a4:23:05:06:01:01" ]
-            },
-            "xconnect": {
-              "of:0000cc37abd93769": [{
-                "vlan": 333,
-                "ports": [1, 2],
-                "name": "vsg-1"
-                },
-                {
-                "vlan": 555,
-                "ports": [1, 2],
-                "name": "vsg-2"},
-                {
-                "vlan": 666,
-                "ports": [1, 2],
-                "name": "vsg-3"}]
-           }
-        }
-    }
-}
\ No newline at end of file
diff --git a/src/test/setup/manifest-cluster.json b/src/test/setup/manifest-cluster.json
deleted file mode 100644
index e311473..0000000
--- a/src/test/setup/manifest-cluster.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "onos_instances": 3,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8"
-}
diff --git a/src/test/setup/manifest-cord.json b/src/test/setup/manifest-cord.json
deleted file mode 100644
index 010033d..0000000
--- a/src/test/setup/manifest-cord.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "onos" : "172.21.0.2",
-    "onos_cord" : "/opt/onos_cord",
-    "service_profile" : "rcord",
-    "synchronizer" : "vtn",
-    "docker_network" : "onoscord_default",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8"
-}
diff --git a/src/test/setup/manifest-fabric.json b/src/test/setup/manifest-fabric.json
deleted file mode 100644
index 129f479..0000000
--- a/src/test/setup/manifest-fabric.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "onos" : "172.20.0.2",
-    "onos_cord" : "/home/vagrant/onos_fabric",
-    "service_profile" : "rcord",
-    "synchronizer" : "fabric",
-    "docker_network" : "onosfabric_default",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.5"
-}
diff --git a/src/test/setup/manifest-olt-voltha.json b/src/test/setup/manifest-olt-voltha.json
deleted file mode 100644
index 7629045..0000000
--- a/src/test/setup/manifest-olt-voltha.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "olt_config" : "olt_config_voltha.json",
-    "start_switch": false,
-    "onos" : "10.70.46.93",
-    "radius" : "172.23.0.3",
-    "onos_image": "onosproject/onos:latest",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.5",
-    "voltha_enable" : true
-}
diff --git a/src/test/setup/manifest-onf-cord.json b/src/test/setup/manifest-onf-cord.json
deleted file mode 100644
index b8445e2..0000000
--- a/src/test/setup/manifest-onf-cord.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "onos" : "172.21.0.2",
-    "onos_cord" : "/opt/onos_cord",
-    "service_profile" : "rcord",
-    "synchronizer" : "vtn",
-    "docker_network" : "onoscord_default",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8",
-    "skip_onos_restart": "true"
-}
diff --git a/src/test/setup/manifest-ponsim.json b/src/test/setup/manifest-ponsim.json
deleted file mode 100644
index dfc73b1..0000000
--- a/src/test/setup/manifest-ponsim.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "olt_config" : "olt_config_ponsim.json",
-    "start_switch": false,
-    "onos_image": "onosproject/onos:latest",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8",
-    "voltha_loc" : "/home/ubuntu/voltha/incubator/voltha",
-    "voltha_intf" : "ponmgmt",
-    "expose_port" : true
-}
diff --git a/src/test/setup/manifest-voltha.json b/src/test/setup/manifest-voltha.json
deleted file mode 100644
index acbbb39..0000000
--- a/src/test/setup/manifest-voltha.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8",
-    "voltha_loc" : "/home/ubuntu/voltha/incubator/voltha",
-    "voltha_intf" : "eth0",
-    "expose_port" : true
-}
diff --git a/src/test/setup/manifest.json b/src/test/setup/manifest.json
deleted file mode 100644
index a957602..0000000
--- a/src/test/setup/manifest.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "onos_instances": 1,
-    "olt": true,
-    "start_switch": true,
-    "onos_image": "onosproject/onos:latest",
-    "log_level" : "INFO",
-    "jvm_heap_size" : "1G",
-    "karaf_version" : "3.0.8"
-}
diff --git a/src/test/setup/of-bridge-local.sh b/src/test/setup/of-bridge-local.sh
deleted file mode 100755
index 3a54962..0000000
--- a/src/test/setup/of-bridge-local.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-bridge="$1"
-controller="$2"
-voltha_loc="$3"
-if [ x"$bridge" = "x" ]; then
-  bridge="ovsbr0"
-fi
-if [ x"$controller" = "x" ]; then
-  controller="$ONOS_CONTROLLER_IP"
-fi
-pkill -9 ofdatapath
-pkill -9 ofprotocol
-service openvswitch-switch restart
-bridges=()
-num_bridges=1
-if [[ $bridge =~ ^[0-9]+$ ]]; then
-    num_bridges=$bridge
-    if [ $num_bridges -eq 0 ]; then
-        num_bridges=1
-    fi
-    for num in $(seq $num_bridges); do
-        if [ $num -eq 1 ]; then
-            br=br-int
-        else
-            br=br-int$num
-        fi
-        n=$(($num-1))
-        bridges[$n]=$br
-    done
-else
-    bridges[0]=$bridge
-fi
-
-#Delete existing bridges if any
-for br in "${bridges[@]}"; do
-    ovs-vsctl del-br $br
-done
-
-proto=tcp
-if [ x"$voltha_loc" != "x" ]; then
-    onos_jks="$voltha_loc/docker/onos_cfg/onos.jks"
-    client_cert="$voltha_loc/pki/voltha.crt"
-    if [ -f $onos_jks ]; then
-        #extract server certificate
-        keytool -export -alias onos -file /tmp/onos.der -keystore $onos_jks -storepass 222222
-        openssl x509 -inform der -in /tmp/onos.der -out /tmp/onos-cert.pem
-        cat /tmp/onos-cert.pem $client_cert > /tmp/voltha-CA.pem
-        echo "Enabling OVS SSL connection to controller"
-        ovs-vsctl set-ssl $voltha_loc/pki/voltha.key $client_cert /tmp/voltha-CA.pem
-        proto=ssl
-    fi
-fi
-
-ctlr=""
-for ip in `echo $controller | tr ',' '\n'`; do
-  ctlr="$ctlr $proto:$ip:6653"
-done
-
-for br in "${bridges[@]}"; do
-    echo "Configuring OVS bridge:$br"
-    ovs-vsctl add-br $br
-    ovs-vsctl set-controller $br $ctlr
-    ovs-vsctl set controller $br max_backoff=1000
-    ovs-vsctl set bridge $br protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
-done
-
-for br in "${bridges[@]}"; do
-    ovs-vsctl show
-    ovs-ofctl show $br
-done
diff --git a/src/test/setup/of-bridge-template.sh b/src/test/setup/of-bridge-template.sh
deleted file mode 100755
index f150201..0000000
--- a/src/test/setup/of-bridge-template.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-bridge="$1"
-controller="$2"
-if [ x"$bridge" = "x" ]; then
-  bridge="br0"
-fi
-if [ x"$controller" = "x" ]; then
-  controller="%%CONTROLLER%%"
-fi
-service openvswitch-switch restart
-num_ports=200
-ports=$(($num_ports-1))
-for vports in $(seq 0 2 $ports); do
-   echo "Deleting veth$vports"
-   ip link del veth$vports
-done
-for vports in $(seq 0 2 $ports); do
-  ip link add type veth
-  ifconfig veth$vports up
-  ifconfig veth$(($vports+1)) up
-done
-echo "Configuring ovs bridge $bridge"
-ovs-vsctl del-br $bridge
-ovs-vsctl add-br $bridge
-for i in $(seq 1 2 $ports); do
-  ovs-vsctl add-port $bridge veth$i
-done
-my_ip=`ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d":" -f2 |cut -d" " -f1`
-ovs-vsctl set-controller $bridge ptcp:6653:$my_ip tcp:$controller:6633
-ovs-vsctl set controller $bridge max_backoff=1000
-ovs-vsctl set bridge $bridge protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
-ovs-vsctl show
-ovs-ofctl show $bridge
diff --git a/src/test/setup/of-bridge.sh b/src/test/setup/of-bridge.sh
deleted file mode 100755
index cd56e6c..0000000
--- a/src/test/setup/of-bridge.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-bridge="$1"
-controller="$2"
-if [ x"$bridge" = "x" ]; then
-  bridge="ovsbr0"
-fi
-if [ x"$controller" = "x" ]; then
-  controller="$ONOS_CONTROLLER_IP"
-fi
-pkill -9 ofdatapath
-pkill -9 ofprotocol
-service openvswitch-switch restart
-num_ports=200
-ports=$(($num_ports-1))
-for vports in $(seq 0 2 $ports); do
-   echo "Deleting veth$vports"
-   ip link del veth$vports 2>/dev/null
-done
-for vports in $(seq 0 2 $ports); do
-  ip link add type veth
-  ifconfig veth$vports up
-  ifconfig veth$(($vports+1)) up
-done
-echo "Configuring ovs bridge $bridge"
-ovs-vsctl del-br $bridge
-ovs-vsctl add-br $bridge
-#ovs-vsctl set bridge $bridge other-config:hwaddr=00:11:22:33:44:55
-for i in $(seq 1 2 $ports); do
-  ovs-vsctl add-port $bridge veth$i
-done
-ctlr=""
-for ip in `echo $controller | tr ',' '\n'`; do
-  ctlr="$ctlr tcp:$ip:6653"
-done
-ovs-vsctl set-controller $bridge $ctlr
-ovs-vsctl set controller $bridge max_backoff=1000
-ovs-vsctl set bridge $bridge protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
-ovs-vsctl show
-ovs-ofctl show $bridge
diff --git a/src/test/setup/olt_config.json b/src/test/setup/olt_config.json
deleted file mode 100644
index 35ab577..0000000
--- a/src/test/setup/olt_config.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{ "olt" : false,
-  "uplink" : 2,
-  "vlan" : 0,
-  "port_map" : { "num_ports" : 11, "start_vlan" : 1000, "nr_switches": 1 },
-  "vcpe" :
-  [
-        {
-          "port" : "fabric", "type" : "reserved", "s_tag" : 222, "c_tag" : 111
-        },
-        {
-          "port" : "fabric", "type" : "reserved", "s_tag" : 333, "c_tag" : 888
-        },
-        {
-          "port" : "fabric", "type" : "reserved", "s_tag" : 555, "c_tag" : 999
-        },
-        {
-          "port" : "fabric", "type" : "reserved", "s_tag" : 666, "c_tag" : 661
-        },
-        {
-          "port" : "fabric", "type" : "dhcp", "s_tag" : 304, "c_tag" : 307
-        },
-        {
-          "port" : "fabric", "type" : "dhcp", "s_tag" : 304, "c_tag" : 308
-        },
-        {
-          "port" : "fabric", "type" : "dhcp", "s_tag" : 304, "c_tag" : 309
-        },
-        {
-          "port" : "fabric", "type" : "dhcp", "s_tag" : 304, "c_tag" : 310
-        },
-        {
-          "port" : "fabric", "type" : "dhcp", "s_tag" : 304, "c_tag" : 311
-        }
-      ]
-}
diff --git a/src/test/setup/olt_config_ponsim.json b/src/test/setup/olt_config_ponsim.json
deleted file mode 100644
index 6c5737d..0000000
--- a/src/test/setup/olt_config_ponsim.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{ "olt" : false,
-  "uplink" : 2,
-  "vlan" : 0,
-  "port_map" : { "num_ports" : 11, "start_vlan" : 0, "nr_switches": 1, "host" : "pon1_128", "ponsim" : true},
-  "vcpe" :
-  [
-        {
-          "port" : "enp0s9", "type" : "reserved", "s_tag" : 222, "c_tag" : 128
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 304
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 305
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 306
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 307
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 308
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 309
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 310
-        },
-        {
-          "port" : "enp0s9", "type" : "dhcp", "s_tag" : 304, "c_tag" : 311
-        }
-      ]
-}
diff --git a/src/test/setup/olt_config_voltha.json b/src/test/setup/olt_config_voltha.json
deleted file mode 100644
index 01ba4e7..0000000
--- a/src/test/setup/olt_config_voltha.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{ "olt" : false,
-  "uplink" : 2,
-  "vlan" : 0,
-  "port_map" : { "ports": [ "veth0", "veth2", "veth4", "veth6", "veth8", "veth10", "veth12", "veth14", "veth16", "veth18", "veth20", "veth22" ], "start_vlan" : 0, "host": "enp1s0f1" },
-  "vcpe" :
-  [
-        {
-          "port" : "enp1s0f1", "type" : "reserved", "s_tag" : 222, "c_tag" : 111
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 304
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 305
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 306
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 307
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 308
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 309
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 310
-        },
-        {
-          "port" : "enp1s0f1", "type" : "dhcp", "s_tag" : 304, "c_tag" : 311
-        }
-      ]
-}
diff --git a/src/test/setup/onos-config/cluster.json b/src/test/setup/onos-config/cluster.json
deleted file mode 100644
index 72ae4c0..0000000
--- a/src/test/setup/onos-config/cluster.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-    "nodes": [
-        {
-            "ip": "172.17.0.2",
-            "id": "172.17.0.2",
-            "port": 9876
-        }
-    ],
-    "name": 4220150786,
-    "partitions": [
-        {
-            "id": 1,
-            "members": [
-                "172.17.0.2"
-            ]
-        }
-    ]
-}
diff --git a/src/test/setup/onos-config/network-cfg-sample.json b/src/test/setup/onos-config/network-cfg-sample.json
deleted file mode 100644
index b858540..0000000
--- a/src/test/setup/onos-config/network-cfg-sample.json
+++ /dev/null
@@ -1 +0,0 @@
-{"apps": {"org.onosproject.router": {"router": {"ospfEnabled": true, "interfaces": ["b1-1", "b1-2", "b1-3", "b1-4", "b1-5", "b1-6", "b1-7", "b1-8", "b1-9", "b1-10", "b1-11", "b1-12", "b1-13", "b1-14", "b1-15", "b1-16", "b1-17", "b1-18", "b1-19", "b1-20", "b1-21", "b1-22", "b1-23", "b1-24", "b1-25", "b1-26", "b1-27", "b1-28", "b1-29", "b1-30", "b1-31", "b1-32", "b1-33", "b1-34", "b1-35", "b1-36", "b1-37", "b1-38", "b1-39", "b1-40", "b1-41", "b1-42", "b1-43", "b1-44", "b1-45", "b1-46", "b1-47", "b1-48", "b1-49", "b1-50", "b1-51", "b1-52", "b1-53", "b1-54", "b1-55", "b1-56", "b1-57", "b1-58", "b1-59", "b1-60", "b1-61", "b1-62", "b1-63", "b1-64", "b1-65", "b1-66", "b1-67", "b1-68", "b1-69", "b1-70", "b1-71", "b1-72", "b1-73", "b1-74", "b1-75", "b1-76", "b1-77", "b1-78", "b1-79", "b1-80", "b1-81", "b1-82", "b1-83", "b1-84", "b1-85", "b1-86", "b1-87", "b1-88", "b1-89", "b1-90", "b1-91", "b1-92", "b1-93", "b1-94", "b1-95", "b1-96", "b1-97", "b1-98", "b1-99"], "controlPlaneConnectPoint": "of:000002ddebbeb549/100"}}}, "ports": {"of:000002ddebbeb549/19": {"interfaces": [{"ips": ["11.0.18.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-19"}]}, "of:000002ddebbeb549/18": {"interfaces": [{"ips": ["11.0.17.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-18"}]}, "of:000002ddebbeb549/11": {"interfaces": [{"ips": ["11.0.10.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-11"}]}, "of:000002ddebbeb549/10": {"interfaces": [{"ips": ["11.0.9.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-10"}]}, "of:000002ddebbeb549/13": {"interfaces": [{"ips": ["11.0.12.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-13"}]}, "of:000002ddebbeb549/12": {"interfaces": [{"ips": ["11.0.11.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-12"}]}, "of:000002ddebbeb549/15": {"interfaces": [{"ips": ["11.0.14.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-15"}]}, "of:000002ddebbeb549/14": {"interfaces": [{"ips": ["11.0.13.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-14"}]}, "of:000002ddebbeb549/17": {"interfaces": [{"ips": ["11.0.16.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-17"}]}, "of:000002ddebbeb549/16": {"interfaces": [{"ips": ["11.0.15.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-16"}]}, "of:000002ddebbeb549/82": {"interfaces": [{"ips": ["11.0.81.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-82"}]}, "of:000002ddebbeb549/83": {"interfaces": [{"ips": ["11.0.82.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-83"}]}, "of:000002ddebbeb549/80": {"interfaces": [{"ips": ["11.0.79.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-80"}]}, "of:000002ddebbeb549/81": {"interfaces": [{"ips": ["11.0.80.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-81"}]}, "of:000002ddebbeb549/86": {"interfaces": [{"ips": ["11.0.85.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-86"}]}, "of:000002ddebbeb549/87": {"interfaces": [{"ips": ["11.0.86.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-87"}]}, "of:000002ddebbeb549/84": {"interfaces": [{"ips": ["11.0.83.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-84"}]}, "of:000002ddebbeb549/85": {"interfaces": [{"ips": ["11.0.84.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-85"}]}, "of:000002ddebbeb549/88": {"interfaces": [{"ips": ["11.0.87.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-88"}]}, "of:000002ddebbeb549/89": {"interfaces": [{"ips": ["11.0.88.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-89"}]}, "of:000002ddebbeb549/1": {"interfaces": [{"ips": ["11.0.0.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-1"}]}, "of:000002ddebbeb549/3": {"interfaces": [{"ips": ["11.0.2.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-3"}]}, "of:000002ddebbeb549/2": {"interfaces": [{"ips": ["11.0.1.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-2"}]}, "of:000002ddebbeb549/5": {"interfaces": [{"ips": ["11.0.4.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-5"}]}, "of:000002ddebbeb549/4": {"interfaces": [{"ips": ["11.0.3.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-4"}]}, "of:000002ddebbeb549/7": {"interfaces": [{"ips": ["11.0.6.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-7"}]}, "of:000002ddebbeb549/6": {"interfaces": [{"ips": ["11.0.5.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-6"}]}, "of:000002ddebbeb549/9": {"interfaces": [{"ips": ["11.0.8.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-9"}]}, "of:000002ddebbeb549/8": {"interfaces": [{"ips": ["11.0.7.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-8"}]}, "of:000002ddebbeb549/91": {"interfaces": [{"ips": ["11.0.90.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-91"}]}, "of:000002ddebbeb549/90": {"interfaces": [{"ips": ["11.0.89.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-90"}]}, "of:000002ddebbeb549/93": {"interfaces": [{"ips": ["11.0.92.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-93"}]}, "of:000002ddebbeb549/92": {"interfaces": [{"ips": ["11.0.91.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-92"}]}, "of:000002ddebbeb549/95": {"interfaces": [{"ips": ["11.0.94.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-95"}]}, "of:000002ddebbeb549/94": {"interfaces": [{"ips": ["11.0.93.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-94"}]}, "of:000002ddebbeb549/97": {"interfaces": [{"ips": ["11.0.96.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-97"}]}, "of:000002ddebbeb549/96": {"interfaces": [{"ips": ["11.0.95.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-96"}]}, "of:000002ddebbeb549/99": {"interfaces": [{"ips": ["11.0.98.2/24", "11.0.99.2/24", "11.0.100.2/24", "11.0.101.2/24", "11.0.102.2/24", "11.0.103.2/24", "11.0.104.2/24", "11.0.105.2/24", "11.0.106.2/24", "11.0.107.2/24", "11.0.108.2/24", "11.0.109.2/24", "11.0.110.2/24", "11.0.111.2/24", "11.0.112.2/24", "11.0.113.2/24", "11.0.114.2/24", "11.0.115.2/24", "11.0.116.2/24", "11.0.117.2/24", "11.0.118.2/24", "11.0.119.2/24", "11.0.120.2/24", "11.0.121.2/24", "11.0.122.2/24", "11.0.123.2/24", "11.0.124.2/24", "11.0.125.2/24", "11.0.126.2/24", "11.0.127.2/24", "11.0.128.2/24", "11.0.129.2/24", "11.0.130.2/24", "11.0.131.2/24", "11.0.132.2/24", "11.0.133.2/24", "11.0.134.2/24", "11.0.135.2/24", "11.0.136.2/24", "11.0.137.2/24", "11.0.138.2/24", "11.0.139.2/24", "11.0.140.2/24", "11.0.141.2/24", "11.0.142.2/24", "11.0.143.2/24", "11.0.144.2/24", "11.0.145.2/24", "11.0.146.2/24", "11.0.147.2/24", "11.0.148.2/24", "11.0.149.2/24", "11.0.150.2/24", "11.0.151.2/24", "11.0.152.2/24", "11.0.153.2/24", "11.0.154.2/24", "11.0.155.2/24", "11.0.156.2/24", "11.0.157.2/24", "11.0.158.2/24", "11.0.159.2/24", "11.0.160.2/24", "11.0.161.2/24", "11.0.162.2/24", "11.0.163.2/24", "11.0.164.2/24", "11.0.165.2/24", "11.0.166.2/24", "11.0.167.2/24", "11.0.168.2/24", "11.0.169.2/24", "11.0.170.2/24", "11.0.171.2/24", "11.0.172.2/24", "11.0.173.2/24", "11.0.174.2/24", "11.0.175.2/24", "11.0.176.2/24", "11.0.177.2/24", "11.0.178.2/24", "11.0.179.2/24", "11.0.180.2/24", "11.0.181.2/24", "11.0.182.2/24", "11.0.183.2/24", "11.0.184.2/24", "11.0.185.2/24", "11.0.186.2/24", "11.0.187.2/24", "11.0.188.2/24", "11.0.189.2/24", "11.0.190.2/24", "11.0.191.2/24", "11.0.192.2/24", "11.0.193.2/24", "11.0.194.2/24", "11.0.195.2/24", "11.0.196.2/24", "11.0.197.2/24", "11.0.198.2/24", "11.0.199.2/24", "11.0.200.2/24", "11.0.201.2/24", "11.0.202.2/24", "11.0.203.2/24", "11.0.204.2/24", "11.0.205.2/24", "11.0.206.2/24", "11.0.207.2/24", "11.0.208.2/24", "11.0.209.2/24", "11.0.210.2/24", "11.0.211.2/24", "11.0.212.2/24", "11.0.213.2/24", "11.0.214.2/24", "11.0.215.2/24", "11.0.216.2/24", "11.0.217.2/24", "11.0.218.2/24", "11.0.219.2/24", "11.0.220.2/24", "11.0.221.2/24", "11.0.222.2/24", "11.0.223.2/24", "11.0.224.2/24", "11.0.225.2/24", "11.0.226.2/24", "11.0.227.2/24", "11.0.228.2/24", "11.0.229.2/24", "11.0.230.2/24", "11.0.231.2/24", "11.0.232.2/24", "11.0.233.2/24", "11.0.234.2/24", "11.0.235.2/24", "11.0.236.2/24", "11.0.237.2/24", "11.0.238.2/24", "11.0.239.2/24", "11.0.240.2/24", "11.0.241.2/24", "11.0.242.2/24", "11.0.243.2/24", "11.0.244.2/24", "11.0.245.2/24", "11.0.246.2/24", "11.0.247.2/24", "11.0.248.2/24", "11.0.249.2/24", "11.0.250.2/24", "11.0.251.2/24", "11.0.252.2/24", "11.0.253.2/24", "11.0.254.2/24", "11.0.255.2/24", "11.1.0.2/24", "11.1.1.2/24", "11.1.2.2/24", "11.1.3.2/24", "11.1.4.2/24", "11.1.5.2/24", "11.1.6.2/24", "11.1.7.2/24", "11.1.8.2/24", "11.1.9.2/24", "11.1.10.2/24", "11.1.11.2/24", "11.1.12.2/24", "11.1.13.2/24", "11.1.14.2/24", "11.1.15.2/24", "11.1.16.2/24", "11.1.17.2/24", "11.1.18.2/24", "11.1.19.2/24", "11.1.20.2/24", "11.1.21.2/24", "11.1.22.2/24", "11.1.23.2/24", "11.1.24.2/24", "11.1.25.2/24", "11.1.26.2/24", "11.1.27.2/24", "11.1.28.2/24", "11.1.29.2/24", "11.1.30.2/24", "11.1.31.2/24", "11.1.32.2/24", "11.1.33.2/24", "11.1.34.2/24", "11.1.35.2/24", "11.1.36.2/24", "11.1.37.2/24", "11.1.38.2/24", "11.1.39.2/24", "11.1.40.2/24", "11.1.41.2/24", "11.1.42.2/24", "11.1.43.2/24", "11.1.44.2/24", "11.1.45.2/24", "11.1.46.2/24", "11.1.47.2/24", "11.1.48.2/24", "11.1.49.2/24", "11.1.50.2/24", "11.1.51.2/24", "11.1.52.2/24", "11.1.53.2/24", "11.1.54.2/24", "11.1.55.2/24", "11.1.56.2/24", "11.1.57.2/24", "11.1.58.2/24", "11.1.59.2/24", "11.1.60.2/24", "11.1.61.2/24", "11.1.62.2/24", "11.1.63.2/24", "11.1.64.2/24", "11.1.65.2/24", "11.1.66.2/24", "11.1.67.2/24", "11.1.68.2/24", "11.1.69.2/24", "11.1.70.2/24", "11.1.71.2/24", "11.1.72.2/24", "11.1.73.2/24", "11.1.74.2/24", "11.1.75.2/24", "11.1.76.2/24", "11.1.77.2/24", "11.1.78.2/24", "11.1.79.2/24", "11.1.80.2/24", "11.1.81.2/24", "11.1.82.2/24", "11.1.83.2/24", "11.1.84.2/24", "11.1.85.2/24", "11.1.86.2/24", "11.1.87.2/24", "11.1.88.2/24", "11.1.89.2/24", "11.1.90.2/24", "11.1.91.2/24", "11.1.92.2/24", "11.1.93.2/24", "11.1.94.2/24", "11.1.95.2/24", "11.1.96.2/24", "11.1.97.2/24", "11.1.98.2/24", "11.1.99.2/24", "11.1.100.2/24", "11.1.101.2/24", "11.1.102.2/24", "11.1.103.2/24", "11.1.104.2/24", "11.1.105.2/24", "11.1.106.2/24", "11.1.107.2/24", "11.1.108.2/24", "11.1.109.2/24", "11.1.110.2/24", "11.1.111.2/24", "11.1.112.2/24", "11.1.113.2/24", "11.1.114.2/24", "11.1.115.2/24", "11.1.116.2/24", "11.1.117.2/24", "11.1.118.2/24", "11.1.119.2/24", "11.1.120.2/24", "11.1.121.2/24", "11.1.122.2/24", "11.1.123.2/24", "11.1.124.2/24", "11.1.125.2/24", "11.1.126.2/24", "11.1.127.2/24", "11.1.128.2/24", "11.1.129.2/24", "11.1.130.2/24", "11.1.131.2/24", "11.1.132.2/24", "11.1.133.2/24", "11.1.134.2/24", "11.1.135.2/24", "11.1.136.2/24", "11.1.137.2/24", "11.1.138.2/24", "11.1.139.2/24", "11.1.140.2/24", "11.1.141.2/24", "11.1.142.2/24", "11.1.143.2/24", "11.1.144.2/24", "11.1.145.2/24", "11.1.146.2/24", "11.1.147.2/24", "11.1.148.2/24", "11.1.149.2/24", "11.1.150.2/24", "11.1.151.2/24", "11.1.152.2/24", "11.1.153.2/24", "11.1.154.2/24", "11.1.155.2/24", "11.1.156.2/24", "11.1.157.2/24", "11.1.158.2/24", "11.1.159.2/24", "11.1.160.2/24", "11.1.161.2/24", "11.1.162.2/24", "11.1.163.2/24", "11.1.164.2/24", "11.1.165.2/24", "11.1.166.2/24", "11.1.167.2/24", "11.1.168.2/24", "11.1.169.2/24", "11.1.170.2/24", "11.1.171.2/24", "11.1.172.2/24", "11.1.173.2/24", "11.1.174.2/24", "11.1.175.2/24", "11.1.176.2/24", "11.1.177.2/24", "11.1.178.2/24", "11.1.179.2/24", "11.1.180.2/24", "11.1.181.2/24", "11.1.182.2/24", "11.1.183.2/24", "11.1.184.2/24", "11.1.185.2/24", "11.1.186.2/24", "11.1.187.2/24", "11.1.188.2/24", "11.1.189.2/24", "11.1.190.2/24", "11.1.191.2/24", "11.1.192.2/24", "11.1.193.2/24", "11.1.194.2/24", "11.1.195.2/24", "11.1.196.2/24", "11.1.197.2/24", "11.1.198.2/24", "11.1.199.2/24", "11.1.200.2/24", "11.1.201.2/24", "11.1.202.2/24", "11.1.203.2/24", "11.1.204.2/24", "11.1.205.2/24", "11.1.206.2/24", "11.1.207.2/24", "11.1.208.2/24", "11.1.209.2/24", "11.1.210.2/24", "11.1.211.2/24", "11.1.212.2/24", "11.1.213.2/24", "11.1.214.2/24", "11.1.215.2/24", "11.1.216.2/24", "11.1.217.2/24", "11.1.218.2/24", "11.1.219.2/24", "11.1.220.2/24", "11.1.221.2/24", "11.1.222.2/24", "11.1.223.2/24", "11.1.224.2/24", "11.1.225.2/24", "11.1.226.2/24", "11.1.227.2/24", "11.1.228.2/24", "11.1.229.2/24", "11.1.230.2/24", "11.1.231.2/24", "11.1.232.2/24", "11.1.233.2/24", "11.1.234.2/24", "11.1.235.2/24", "11.1.236.2/24", "11.1.237.2/24", "11.1.238.2/24", "11.1.239.2/24", "11.1.240.2/24", "11.1.241.2/24", "11.1.242.2/24", "11.1.243.2/24", "11.1.244.2/24", "11.1.245.2/24", "11.1.246.2/24", "11.1.247.2/24", "11.1.248.2/24", "11.1.249.2/24", "11.1.250.2/24", "11.1.251.2/24", "11.1.252.2/24", "11.1.253.2/24", "11.1.254.2/24", "11.1.255.2/24", "11.2.0.2/24", "11.2.1.2/24", "11.2.2.2/24", "11.2.3.2/24", "11.2.4.2/24", "11.2.5.2/24", "11.2.6.2/24", "11.2.7.2/24", "11.2.8.2/24", "11.2.9.2/24", "11.2.10.2/24", "11.2.11.2/24", "11.2.12.2/24", "11.2.13.2/24", "11.2.14.2/24", "11.2.15.2/24", "11.2.16.2/24", "11.2.17.2/24", "11.2.18.2/24", "11.2.19.2/24", "11.2.20.2/24", "11.2.21.2/24", "11.2.22.2/24", "11.2.23.2/24", "11.2.24.2/24", "11.2.25.2/24", "11.2.26.2/24", "11.2.27.2/24", "11.2.28.2/24", "11.2.29.2/24", "11.2.30.2/24", "11.2.31.2/24", "11.2.32.2/24", "11.2.33.2/24", "11.2.34.2/24", "11.2.35.2/24", "11.2.36.2/24", "11.2.37.2/24", "11.2.38.2/24", "11.2.39.2/24", "11.2.40.2/24", "11.2.41.2/24", "11.2.42.2/24", "11.2.43.2/24", "11.2.44.2/24", "11.2.45.2/24", "11.2.46.2/24", "11.2.47.2/24", "11.2.48.2/24", "11.2.49.2/24", "11.2.50.2/24", "11.2.51.2/24", "11.2.52.2/24", "11.2.53.2/24", "11.2.54.2/24", "11.2.55.2/24", "11.2.56.2/24", "11.2.57.2/24", "11.2.58.2/24", "11.2.59.2/24", "11.2.60.2/24", "11.2.61.2/24", "11.2.62.2/24", "11.2.63.2/24", "11.2.64.2/24", "11.2.65.2/24", "11.2.66.2/24", "11.2.67.2/24", "11.2.68.2/24", "11.2.69.2/24", "11.2.70.2/24", "11.2.71.2/24", "11.2.72.2/24", "11.2.73.2/24", "11.2.74.2/24", "11.2.75.2/24", "11.2.76.2/24", "11.2.77.2/24", "11.2.78.2/24", "11.2.79.2/24", "11.2.80.2/24", "11.2.81.2/24", "11.2.82.2/24", "11.2.83.2/24", "11.2.84.2/24", "11.2.85.2/24", "11.2.86.2/24", "11.2.87.2/24", "11.2.88.2/24", "11.2.89.2/24", "11.2.90.2/24", "11.2.91.2/24", "11.2.92.2/24", "11.2.93.2/24", "11.2.94.2/24", "11.2.95.2/24", "11.2.96.2/24", "11.2.97.2/24", "11.2.98.2/24", "11.2.99.2/24", "11.2.100.2/24", "11.2.101.2/24", "11.2.102.2/24", "11.2.103.2/24", "11.2.104.2/24", "11.2.105.2/24", "11.2.106.2/24", "11.2.107.2/24", "11.2.108.2/24", "11.2.109.2/24", "11.2.110.2/24", "11.2.111.2/24", "11.2.112.2/24", "11.2.113.2/24", "11.2.114.2/24", "11.2.115.2/24", "11.2.116.2/24", "11.2.117.2/24", "11.2.118.2/24", "11.2.119.2/24", "11.2.120.2/24", "11.2.121.2/24", "11.2.122.2/24", "11.2.123.2/24", "11.2.124.2/24", "11.2.125.2/24", "11.2.126.2/24", "11.2.127.2/24", "11.2.128.2/24", "11.2.129.2/24", "11.2.130.2/24", "11.2.131.2/24", "11.2.132.2/24", "11.2.133.2/24", "11.2.134.2/24", "11.2.135.2/24", "11.2.136.2/24", "11.2.137.2/24", "11.2.138.2/24", "11.2.139.2/24", "11.2.140.2/24", "11.2.141.2/24", "11.2.142.2/24", "11.2.143.2/24", "11.2.144.2/24", "11.2.145.2/24", "11.2.146.2/24", "11.2.147.2/24", "11.2.148.2/24", "11.2.149.2/24", "11.2.150.2/24", "11.2.151.2/24", "11.2.152.2/24", "11.2.153.2/24", "11.2.154.2/24", "11.2.155.2/24", "11.2.156.2/24", "11.2.157.2/24", "11.2.158.2/24", "11.2.159.2/24", "11.2.160.2/24", "11.2.161.2/24", "11.2.162.2/24", "11.2.163.2/24", "11.2.164.2/24", "11.2.165.2/24", "11.2.166.2/24", "11.2.167.2/24", "11.2.168.2/24", "11.2.169.2/24", "11.2.170.2/24", "11.2.171.2/24", "11.2.172.2/24", "11.2.173.2/24", "11.2.174.2/24", "11.2.175.2/24", "11.2.176.2/24", "11.2.177.2/24", "11.2.178.2/24", "11.2.179.2/24", "11.2.180.2/24", "11.2.181.2/24", "11.2.182.2/24", "11.2.183.2/24", "11.2.184.2/24", "11.2.185.2/24", "11.2.186.2/24", "11.2.187.2/24", "11.2.188.2/24", "11.2.189.2/24", "11.2.190.2/24", "11.2.191.2/24", "11.2.192.2/24", "11.2.193.2/24", "11.2.194.2/24", "11.2.195.2/24", "11.2.196.2/24", "11.2.197.2/24", "11.2.198.2/24", "11.2.199.2/24", "11.2.200.2/24", "11.2.201.2/24", "11.2.202.2/24", "11.2.203.2/24", "11.2.204.2/24", "11.2.205.2/24", "11.2.206.2/24", "11.2.207.2/24", "11.2.208.2/24", "11.2.209.2/24", "11.2.210.2/24", "11.2.211.2/24", "11.2.212.2/24", "11.2.213.2/24", "11.2.214.2/24", "11.2.215.2/24", "11.2.216.2/24", "11.2.217.2/24", "11.2.218.2/24", "11.2.219.2/24", "11.2.220.2/24", "11.2.221.2/24", "11.2.222.2/24", "11.2.223.2/24", "11.2.224.2/24", "11.2.225.2/24", "11.2.226.2/24", "11.2.227.2/24", "11.2.228.2/24", "11.2.229.2/24", "11.2.230.2/24", "11.2.231.2/24", "11.2.232.2/24", "11.2.233.2/24", "11.2.234.2/24", "11.2.235.2/24", "11.2.236.2/24", "11.2.237.2/24", "11.2.238.2/24", "11.2.239.2/24", "11.2.240.2/24", "11.2.241.2/24", "11.2.242.2/24", "11.2.243.2/24", "11.2.244.2/24", "11.2.245.2/24", "11.2.246.2/24", "11.2.247.2/24", "11.2.248.2/24", "11.2.249.2/24", "11.2.250.2/24", "11.2.251.2/24", "11.2.252.2/24", "11.2.253.2/24", "11.2.254.2/24", "11.2.255.2/24", "11.3.0.2/24", "11.3.1.2/24", "11.3.2.2/24", "11.3.3.2/24", "11.3.4.2/24", "11.3.5.2/24", "11.3.6.2/24", "11.3.7.2/24", "11.3.8.2/24", "11.3.9.2/24", "11.3.10.2/24", "11.3.11.2/24", "11.3.12.2/24", "11.3.13.2/24", "11.3.14.2/24", "11.3.15.2/24", "11.3.16.2/24", "11.3.17.2/24", "11.3.18.2/24", "11.3.19.2/24", "11.3.20.2/24", "11.3.21.2/24", "11.3.22.2/24", "11.3.23.2/24", "11.3.24.2/24", "11.3.25.2/24", "11.3.26.2/24", "11.3.27.2/24", "11.3.28.2/24", "11.3.29.2/24", "11.3.30.2/24", "11.3.31.2/24", "11.3.32.2/24", "11.3.33.2/24", "11.3.34.2/24", "11.3.35.2/24", "11.3.36.2/24", "11.3.37.2/24", "11.3.38.2/24", "11.3.39.2/24", "11.3.40.2/24", "11.3.41.2/24", "11.3.42.2/24", "11.3.43.2/24", "11.3.44.2/24", "11.3.45.2/24", "11.3.46.2/24", "11.3.47.2/24", "11.3.48.2/24", "11.3.49.2/24", "11.3.50.2/24", "11.3.51.2/24", "11.3.52.2/24", "11.3.53.2/24", "11.3.54.2/24", "11.3.55.2/24", "11.3.56.2/24", "11.3.57.2/24", "11.3.58.2/24", "11.3.59.2/24", "11.3.60.2/24", "11.3.61.2/24", "11.3.62.2/24", "11.3.63.2/24", "11.3.64.2/24", "11.3.65.2/24", "11.3.66.2/24", "11.3.67.2/24", "11.3.68.2/24", "11.3.69.2/24", "11.3.70.2/24", "11.3.71.2/24", "11.3.72.2/24", "11.3.73.2/24", "11.3.74.2/24", "11.3.75.2/24", "11.3.76.2/24", "11.3.77.2/24", "11.3.78.2/24", "11.3.79.2/24", "11.3.80.2/24", "11.3.81.2/24", "11.3.82.2/24", "11.3.83.2/24", "11.3.84.2/24", "11.3.85.2/24", "11.3.86.2/24", "11.3.87.2/24", "11.3.88.2/24", "11.3.89.2/24", "11.3.90.2/24", "11.3.91.2/24", "11.3.92.2/24", "11.3.93.2/24", "11.3.94.2/24", "11.3.95.2/24", "11.3.96.2/24", "11.3.97.2/24", "11.3.98.2/24", "11.3.99.2/24", "11.3.100.2/24", "11.3.101.2/24", "11.3.102.2/24", "11.3.103.2/24", "11.3.104.2/24", "11.3.105.2/24", "11.3.106.2/24", "11.3.107.2/24", "11.3.108.2/24", "11.3.109.2/24", "11.3.110.2/24", "11.3.111.2/24", "11.3.112.2/24", "11.3.113.2/24", "11.3.114.2/24", "11.3.115.2/24", "11.3.116.2/24", "11.3.117.2/24", "11.3.118.2/24", "11.3.119.2/24", "11.3.120.2/24", "11.3.121.2/24", "11.3.122.2/24", "11.3.123.2/24", "11.3.124.2/24", "11.3.125.2/24", "11.3.126.2/24", "11.3.127.2/24", "11.3.128.2/24", "11.3.129.2/24", "11.3.130.2/24", "11.3.131.2/24", "11.3.132.2/24", "11.3.133.2/24", "11.3.134.2/24", "11.3.135.2/24", "11.3.136.2/24", "11.3.137.2/24", "11.3.138.2/24", "11.3.139.2/24", "11.3.140.2/24", "11.3.141.2/24", "11.3.142.2/24", "11.3.143.2/24", "11.3.144.2/24", "11.3.145.2/24", "11.3.146.2/24", "11.3.147.2/24", "11.3.148.2/24", "11.3.149.2/24", "11.3.150.2/24", "11.3.151.2/24", "11.3.152.2/24", "11.3.153.2/24", "11.3.154.2/24", "11.3.155.2/24", "11.3.156.2/24", "11.3.157.2/24", "11.3.158.2/24", "11.3.159.2/24", "11.3.160.2/24", "11.3.161.2/24", "11.3.162.2/24", "11.3.163.2/24", "11.3.164.2/24", "11.3.165.2/24", "11.3.166.2/24", "11.3.167.2/24", "11.3.168.2/24", "11.3.169.2/24", "11.3.170.2/24", "11.3.171.2/24", "11.3.172.2/24", "11.3.173.2/24", "11.3.174.2/24", "11.3.175.2/24", "11.3.176.2/24", "11.3.177.2/24", "11.3.178.2/24", "11.3.179.2/24", "11.3.180.2/24", "11.3.181.2/24", "11.3.182.2/24", "11.3.183.2/24", "11.3.184.2/24", "11.3.185.2/24", "11.3.186.2/24", "11.3.187.2/24", "11.3.188.2/24", "11.3.189.2/24", "11.3.190.2/24", "11.3.191.2/24", "11.3.192.2/24", "11.3.193.2/24", "11.3.194.2/24", "11.3.195.2/24", "11.3.196.2/24", "11.3.197.2/24", "11.3.198.2/24", "11.3.199.2/24", "11.3.200.2/24", "11.3.201.2/24", "11.3.202.2/24", "11.3.203.2/24", "11.3.204.2/24", "11.3.205.2/24", "11.3.206.2/24", "11.3.207.2/24", "11.3.208.2/24", "11.3.209.2/24", "11.3.210.2/24", "11.3.211.2/24", "11.3.212.2/24", "11.3.213.2/24", "11.3.214.2/24", "11.3.215.2/24", "11.3.216.2/24", "11.3.217.2/24", "11.3.218.2/24", "11.3.219.2/24", "11.3.220.2/24", "11.3.221.2/24", "11.3.222.2/24", "11.3.223.2/24", "11.3.224.2/24", "11.3.225.2/24", "11.3.226.2/24", "11.3.227.2/24", "11.3.228.2/24", "11.3.229.2/24", "11.3.230.2/24", "11.3.231.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-99"}]}, "of:000002ddebbeb549/98": {"interfaces": [{"ips": ["11.0.97.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-98"}]}, "of:000002ddebbeb549/24": {"interfaces": [{"ips": ["11.0.23.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-24"}]}, "of:000002ddebbeb549/25": {"interfaces": [{"ips": ["11.0.24.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-25"}]}, "of:000002ddebbeb549/26": {"interfaces": [{"ips": ["11.0.25.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-26"}]}, "of:000002ddebbeb549/27": {"interfaces": [{"ips": ["11.0.26.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-27"}]}, "of:000002ddebbeb549/20": {"interfaces": [{"ips": ["11.0.19.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-20"}]}, "of:000002ddebbeb549/21": {"interfaces": [{"ips": ["11.0.20.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-21"}]}, "of:000002ddebbeb549/22": {"interfaces": [{"ips": ["11.0.21.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-22"}]}, "of:000002ddebbeb549/23": {"interfaces": [{"ips": ["11.0.22.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-23"}]}, "of:000002ddebbeb549/28": {"interfaces": [{"ips": ["11.0.27.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-28"}]}, "of:000002ddebbeb549/29": {"interfaces": [{"ips": ["11.0.28.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-29"}]}, "of:000002ddebbeb549/46": {"interfaces": [{"ips": ["11.0.45.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-46"}]}, "of:000002ddebbeb549/47": {"interfaces": [{"ips": ["11.0.46.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-47"}]}, "of:000002ddebbeb549/44": {"interfaces": [{"ips": ["11.0.43.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-44"}]}, "of:000002ddebbeb549/45": {"interfaces": [{"ips": ["11.0.44.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-45"}]}, "of:000002ddebbeb549/42": {"interfaces": [{"ips": ["11.0.41.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-42"}]}, "of:000002ddebbeb549/43": {"interfaces": [{"ips": ["11.0.42.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-43"}]}, "of:000002ddebbeb549/40": {"interfaces": [{"ips": ["11.0.39.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-40"}]}, "of:000002ddebbeb549/41": {"interfaces": [{"ips": ["11.0.40.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-41"}]}, "of:000002ddebbeb549/48": {"interfaces": [{"ips": ["11.0.47.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-48"}]}, "of:000002ddebbeb549/49": {"interfaces": [{"ips": ["11.0.48.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-49"}]}, "of:000002ddebbeb549/33": {"interfaces": [{"ips": ["11.0.32.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-33"}]}, "of:000002ddebbeb549/32": {"interfaces": [{"ips": ["11.0.31.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-32"}]}, "of:000002ddebbeb549/31": {"interfaces": [{"ips": ["11.0.30.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-31"}]}, "of:000002ddebbeb549/30": {"interfaces": [{"ips": ["11.0.29.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-30"}]}, "of:000002ddebbeb549/37": {"interfaces": [{"ips": ["11.0.36.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-37"}]}, "of:000002ddebbeb549/36": {"interfaces": [{"ips": ["11.0.35.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-36"}]}, "of:000002ddebbeb549/35": {"interfaces": [{"ips": ["11.0.34.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-35"}]}, "of:000002ddebbeb549/34": {"interfaces": [{"ips": ["11.0.33.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-34"}]}, "of:000002ddebbeb549/39": {"interfaces": [{"ips": ["11.0.38.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-39"}]}, "of:000002ddebbeb549/38": {"interfaces": [{"ips": ["11.0.37.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-38"}]}, "of:000002ddebbeb549/55": {"interfaces": [{"ips": ["11.0.54.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-55"}]}, "of:000002ddebbeb549/54": {"interfaces": [{"ips": ["11.0.53.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-54"}]}, "of:000002ddebbeb549/57": {"interfaces": [{"ips": ["11.0.56.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-57"}]}, "of:000002ddebbeb549/56": {"interfaces": [{"ips": ["11.0.55.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-56"}]}, "of:000002ddebbeb549/51": {"interfaces": [{"ips": ["11.0.50.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-51"}]}, "of:000002ddebbeb549/50": {"interfaces": [{"ips": ["11.0.49.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-50"}]}, "of:000002ddebbeb549/53": {"interfaces": [{"ips": ["11.0.52.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-53"}]}, "of:000002ddebbeb549/52": {"interfaces": [{"ips": ["11.0.51.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-52"}]}, "of:000002ddebbeb549/59": {"interfaces": [{"ips": ["11.0.58.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-59"}]}, "of:000002ddebbeb549/58": {"interfaces": [{"ips": ["11.0.57.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-58"}]}, "of:000002ddebbeb549/60": {"interfaces": [{"ips": ["11.0.59.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-60"}]}, "of:000002ddebbeb549/61": {"interfaces": [{"ips": ["11.0.60.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-61"}]}, "of:000002ddebbeb549/62": {"interfaces": [{"ips": ["11.0.61.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-62"}]}, "of:000002ddebbeb549/63": {"interfaces": [{"ips": ["11.0.62.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-63"}]}, "of:000002ddebbeb549/64": {"interfaces": [{"ips": ["11.0.63.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-64"}]}, "of:000002ddebbeb549/65": {"interfaces": [{"ips": ["11.0.64.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-65"}]}, "of:000002ddebbeb549/66": {"interfaces": [{"ips": ["11.0.65.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-66"}]}, "of:000002ddebbeb549/67": {"interfaces": [{"ips": ["11.0.66.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-67"}]}, "of:000002ddebbeb549/68": {"interfaces": [{"ips": ["11.0.67.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-68"}]}, "of:000002ddebbeb549/69": {"interfaces": [{"ips": ["11.0.68.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-69"}]}, "of:000002ddebbeb549/79": {"interfaces": [{"ips": ["11.0.78.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-79"}]}, "of:000002ddebbeb549/78": {"interfaces": [{"ips": ["11.0.77.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-78"}]}, "of:000002ddebbeb549/77": {"interfaces": [{"ips": ["11.0.76.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-77"}]}, "of:000002ddebbeb549/76": {"interfaces": [{"ips": ["11.0.75.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-76"}]}, "of:000002ddebbeb549/75": {"interfaces": [{"ips": ["11.0.74.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-75"}]}, "of:000002ddebbeb549/74": {"interfaces": [{"ips": ["11.0.73.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-74"}]}, "of:000002ddebbeb549/73": {"interfaces": [{"ips": ["11.0.72.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-73"}]}, "of:000002ddebbeb549/72": {"interfaces": [{"ips": ["11.0.71.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-72"}]}, "of:000002ddebbeb549/71": {"interfaces": [{"ips": ["11.0.70.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-71"}]}, "of:000002ddebbeb549/70": {"interfaces": [{"ips": ["11.0.69.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-70"}]}}, "devices": {"of:000002ddebbeb549": {"basic": {"driver": "softrouter"}}}}
\ No newline at end of file
diff --git a/src/test/setup/onos-form-cluster b/src/test/setup/onos-form-cluster
deleted file mode 100755
index 578a443..0000000
--- a/src/test/setup/onos-form-cluster
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-# -----------------------------------------------------------------------------
-# Forms ONOS cluster using REST API of each separate instance.
-# -----------------------------------------------------------------------------
-
-[ $# -lt 2 ] && echo "usage: $(basename $0) ip1 ip2..." && exit 1
-
-# Scan arguments for user/password or other options...
-while getopts u:p: o; do
-    case "$o" in
-        u) user=$OPTARG;;
-        p) password=$OPTARG;;
-    esac
-done
-ONOS_WEB_USER=${ONOS_WEB_USER:-onos} # ONOS WEB User defaults to 'onos'
-ONOS_WEB_PASS=${ONOS_WEB_PASS:-rocks} # ONOS WEB Password defaults to 'rocks'
-user=${user:-$ONOS_WEB_USER}
-password=${password:-$ONOS_WEB_PASS}
-let OPC=$OPTIND-1
-shift $OPC
-
-ip=$1
-shift
-nodes=$*
-
-ipPrefix=${ip%.*}
-
-aux=/tmp/${ipPrefix}.cluster.json
-trap "rm -f $aux" EXIT
-
-echo "{ \"nodes\": [ { \"ip\": \"$ip\" }" > $aux
-for node in $nodes; do
-    echo ", { \"ip\": \"$node\" }" >> $aux
-done
-echo "], \"ipPrefix\": \"$ipPrefix.*\" }" >> $aux
-
-for node in $ip $nodes; do
-    echo "Forming cluster on $node..."
-    curl --user $user:$password -X POST \
-        http://$node:8181/onos/v1/cluster/configuration -d @$aux
-done
diff --git a/src/test/setup/onos-gen-partitions b/src/test/setup/onos-gen-partitions
deleted file mode 100755
index 8221710..0000000
--- a/src/test/setup/onos-gen-partitions
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-  Generate the partitions json file from the $OC* environment variables
-
-  Usage: onos-gen-partitions [output file] [node_ip ...]
-  If output file is not provided, the json is written to stdout.
-"""
-
-from os import environ
-from collections import deque, OrderedDict
-import re
-import json
-import sys
-import hashlib
-
-convert = lambda text: int(text) if text.isdigit() else text.lower()
-alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
-
-def get_OC_vars():
-  vars = []
-  for var in environ:
-    if re.match(r"OC[0-9]+", var):
-      vars.append(var)
-  return sorted(vars, key=alphanum_key)
-
-def get_nodes(ips=None, port=9876):
-  node = lambda k: { 'id': k, 'ip': k, 'port': port }
-  if not ips:
-    ips = [ environ[v] for v in get_OC_vars() ]
-  return [ node(v) for v in ips ]
-
-def generate_partitions(nodes, k):
-  l = deque(nodes)
-  perms = []
-  for i in range(1, len(nodes)+1):
-    part = {
-             'id': i,
-             'members': list(l)[:k]
-           }
-    perms.append(part)
-    l.rotate(-1)
-  return perms
-
-if __name__ == '__main__':
-  nodes = get_nodes(sys.argv[2:])
-  partitions = generate_partitions([v.get('id') for v in nodes], 3)
-  m = hashlib.sha256()
-  for node in nodes:
-    m.update(node['ip'])
-  name = int(m.hexdigest()[:8], base=16) # 32-bit int based on SHA256 digest
-  data = {
-           'name': name,
-           'nodes': nodes,
-           'partitions': partitions
-         }
-  output = json.dumps(data, indent=4)
-
-  if len(sys.argv) >= 2 and sys.argv[1] != '-':
-    filename = sys.argv[1]
-    with open(filename, 'w') as f:
-      f.write(output)
-  else:
-    print output
diff --git a/src/test/setup/onos_pull.sh b/src/test/setup/onos_pull.sh
deleted file mode 100755
index ebd463b..0000000
--- a/src/test/setup/onos_pull.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-##Use this script as SUDO to pull ONOS safely as it saves/archives repo digest ids.
-##Repo digest ids are saved in $HOME/onos_repo_digest.txt
-tag=${1:-latest}
-repo_digest="$HOME/onos_repo_digest.txt"
-echo "Pulling ONOS $tag"
-digest=`docker pull onosproject/onos:$tag | grep Digest`
-echo "Got $digest for ONOS $tag"
-repo=`echo $digest | cut -d ":" -f2- | sed 's,[[:space:]]*,,'`
-echo "ONOS $tag repo id $repo saved in $repo_digest"
-d=`date +%D`
-echo "$d onosproject/onos:$tag $repo" >>$repo_digest
diff --git a/src/test/setup/prerequisites.sh b/src/test/setup/prerequisites.sh
deleted file mode 100755
index f0bbbfa..0000000
--- a/src/test/setup/prerequisites.sh
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-function usage {
-    echo "usage: ${0#*/} [-h |--help] [--cord] [--venv]"
-    exit 1
-}
-
-on_cord=0
-venv=0
-optspec=":h-:"
-while getopts "$optspec" optchar; do
-    case "${optchar}" in
-        -)
-            case "${OPTARG}" in
-                cord)
-                    on_cord=1
-                    ;;
-                venv)
-                    venv=1
-                    ;;
-                help)
-                    usage
-                    ;;
-                *)
-                    echo "Unknown option --${OPTARG}"
-                    usage
-                    ;;
-            esac
-            ;;
-        h)
-            usage
-            ;;
-        *)
-            usage
-            ;;
-    esac
-done
-
-shift $((OPTIND-1))
-if [ $# -gt 0 ]; then
-    usage
-fi
-
-apt-get update
-release=$(lsb_release -cs)
-#install docker only if not installed already. On cord, its mostly installed.
-if $(which docker 2>&1 >/dev/null); then
-    on_cord=1
-else
-    on_cord=0
-fi
-if [ $on_cord -eq 0 ]; then
-    apt-get -y install apt-transport-https ca-certificates
-    apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-    if [ ! -f /etc/apt/sources.list.d/docker.list ]; then
-        echo deb https://apt.dockerproject.org/repo ubuntu-$release main |  tee /etc/apt/sources.list.d/docker.list
-    fi
-    apt-get update
-    apt-get purge lxc-docker || true
-    apt-get -y install linux-image-extra-$(uname -r)
-    apt-get -y install apparmor
-    echo "Installing Docker"
-    apt-get -y install docker-engine
-    service docker restart
-    sleep 5
-    echo "Verifying Docker installation"
-    docker run --rm hello-world || exit 127
-    docker rmi hello-world
-    echo "Pulling ONOS latest"
-    docker pull onosproject/onos:latest || exit 127
-else
-    echo "Skipping installation of Docker and ONOS"
-fi
-
-apt-get -y install openvswitch-common openvswitch-switch
-apt-get -y install wget git python python-dev python-pip python-setuptools python-scapy python-pexpect python-maas-client tcpdump arping libssl-dev libffi-dev realpath python-virtualenv
-
-setup_path=$(dirname $(realpath $0))
-if [ $venv -eq 1 ]; then
-    echo "Making a virtual cord-tester pip installation environment"
-    mkdir -p $setup_path/venv
-    virtualenv $setup_path/venv
-    echo "Installing cord-tester pip packages on the virtual environment"
-    $setup_path/venv/bin/pip install -U distribute
-    $setup_path/venv/bin/pip install -U -r $setup_path/requirements.txt
-else
-    echo "Installing cord-tester pip packages on the host"
-    pip install -U distribute
-    pip install -U -r $setup_path/requirements.txt
-fi
-
-( cd /tmp && git clone https://github.com/jpetazzo/pipework.git && cp -v pipework/pipework /usr/bin && rm -rf pipework )
-
-install_ovs() {
-    mkdir -p /root/ovs
-    wget http://openvswitch.org/releases/openvswitch-2.5.0.tar.gz -O /root/ovs/openvswitch-2.5.0.tar.gz && \
-    ( cd /root/ovs && tar zxpvf openvswitch-2.5.0.tar.gz && \
-      cd openvswitch-2.5.0 && \
-      ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install
-    )
-}
-
-ovs_install=0
-
-if [ -f /usr/bin/ovs-vsctl ] || [ -f /usr/local/bin/ovs-vsctl ]; then
-    ##find the version. Install if ovs version less than 2.5
-    version=`sudo ovs-vsctl --version | head -1  | awk '/[1-9].[0-9].[0-9]/ {print $NF}'`
-    major=$(echo $version | cut -d "." -f1)
-    minor=$(echo $version | cut -d "." -f2)
-    if [ $major -le 2 ]; then
-        if [ $major -lt 2 ]; then
-            ovs_install=1
-        else
-            if [ $minor -lt 5 ]; then
-                ovs_install=1
-            fi
-        fi
-    fi
-else
-    ovs_install=1
-fi
-
-if [ $ovs_install -eq 1 ]; then
-    echo "Installing OVS 2.5.0"
-    service openvswitch-switch stop
-    install_ovs
-fi
-
-test_images=(cordtest/radius:candidate cordtest/quagga:candidate cordtest/nose:candidate)
-for img in ${test_images[*]}; do
-    echo "Pulling cord-tester image $img"
-    docker pull $img 2>/dev/null
-done
diff --git a/src/test/setup/qct_fabric_test_netcfg.json b/src/test/setup/qct_fabric_test_netcfg.json
deleted file mode 100644
index b839cc0..0000000
--- a/src/test/setup/qct_fabric_test_netcfg.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
-    "devices": {
-        "of:0000480fcfaeee2a": {
-            "segmentrouting": {
-                "name": "device-480fcfaeee2a",
-                "ipv4NodeSid": 100,
-                "ipv4Loopback": "10.6.0.103",
-                "routerMac": "48:0f:cf:ae:ee:2a",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfaede26": {
-            "segmentrouting": {
-                "name": "device-480fcfaede26",
-                "ipv4NodeSid": 101,
-                "ipv4Loopback": "10.6.0.101",
-                "routerMac": "48:0f:cf:ae:de:26",
-                "isEdgeRouter": true,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfae9d58": {
-            "segmentrouting": {
-                "name": "device-480fcfae9d58",
-                "ipv4NodeSid": 102,
-                "ipv4Loopback": "10.6.0.104",
-                "routerMac": "48:0f:cf:ae:9d:58",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        },
-        "of:0000480fcfaecc24": {
-            "segmentrouting": {
-                "name": "device-480fcfaecc24",
-                "ipv4NodeSid": 103,
-                "ipv4Loopback": "10.6.0.102",
-                "routerMac": "48:0f:cf:ae:cc:24",
-                "isEdgeRouter": false,
-                "adjacencySids": []
-            }
-        }
-    },
-    "ports": {
-        "of:0000480fcfaeee2a/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.2.254/24" ],
-                    "vlan-untagged" : 2
-                }
-            ]
-        },
-        "of:0000480fcfaede26/1": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.1.254/24" ],
-                    "vlan-untagged" : 1
-                }
-            ]
-        },
-        "of:0000480fcfaede26/2": {
-            "interfaces": [
-                {
-                    "ips": [ "10.6.1.254/24" ],
-                    "vlan-untagged" : 1
-                }
-            ]
-        }
-    },
-    "apps" : {
-        "org.onosproject.segmentrouting" : {
-            "segmentrouting" : {
-                "vRouterMacs" : [ "a4:23:05:06:01:01" ]
-            },
-            "xconnect": {
-              "of:0000480fcfaede26": [{
-                "vlan": 333,
-                "ports": [1, 2],
-                "name": "vsg-1"
-                },
-                {
-                "vlan": 555,
-                "ports": [1, 2],
-                "name": "vsg-2"},
-                {
-                "vlan": 666,
-                "ports": [1, 2],
-                "name": "vsg-3"}]
-           }
-        }
-    }
-}
\ No newline at end of file
diff --git a/src/test/setup/quagga-config/bgpd.conf b/src/test/setup/quagga-config/bgpd.conf
deleted file mode 100644
index e543ff4..0000000
--- a/src/test/setup/quagga-config/bgpd.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-hostname bgpd
-password zebra
-router bgp 1000
-bgp router-id 10.10.0.1
-neighbor 10.10.0.19 remote-as 1019
-neighbor 10.10.0.19 advertisement-interval 1
-neighbor 10.10.0.19 route-server-client
-neighbor 10.10.0.19 timers 30 90
\ No newline at end of file
diff --git a/src/test/setup/quagga-config/start.sh b/src/test/setup/quagga-config/start.sh
deleted file mode 100755
index 3adabf4..0000000
--- a/src/test/setup/quagga-config/start.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-ulimit -n 65536
-ip a add 10.10.0.3/16 dev eth1
-#bgpd -u root -f /root/config/bgpd.conf &
-conf_file=${1:-/root/config/testrib.conf}
-base_conf=$(basename $conf_file)
-base_conf=${base_conf%%.conf}
-if [[ $base_conf == bgpd* ]]; then
-    /usr/local/sbin/bgpd -u root -f $conf_file
-else
-    /usr/local/sbin/zebra -u root -f $conf_file
-fi
diff --git a/src/test/setup/quagga-config/stop.sh b/src/test/setup/quagga-config/stop.sh
deleted file mode 100755
index ab11808..0000000
--- a/src/test/setup/quagga-config/stop.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-pkill -9 zebra
diff --git a/src/test/setup/quagga-config/testrib.conf b/src/test/setup/quagga-config/testrib.conf
deleted file mode 100644
index af2f213..0000000
--- a/src/test/setup/quagga-config/testrib.conf
+++ /dev/null
@@ -1,20 +0,0 @@
-!
-! Zebra configuration saved from vty
-!   2007/04/01 17:46:48
-!
-password zebra
-log stdout
-service advanced-vty
-!
-debug zebra rib
-debug zebra kernel
-debug zebra fpm
-!
-!interface eth1
-! ip address 10.10.0.3/16
-
-! statics that should be subsumed by connected routes, according to interface
-! state
-line vty
- exec-timeout 0 0
-!
diff --git a/src/test/setup/radius-config/README.opensslcert b/src/test/setup/radius-config/README.opensslcert
deleted file mode 100644
index aa600bf..0000000
--- a/src/test/setup/radius-config/README.opensslcert
+++ /dev/null
@@ -1,30 +0,0 @@
-To update expired certificates,
-use the current working certs_2 directory and copy it as certs_4
-cp -rv certs_2 certs_4
-cd certs_4
-Update ca.cnf,server.cnf,client.cnf default_days field to update certificate expiry.
-Then type:
-make clean
-make
-to create the new certificates.
-
-Now decrypt the openssl rsa keys for:
-client.key, server.key and ca.key
-
-openssl rsa -in ca.key -out ca.key.decrypted
-openssl rsa -in client.key -out client.key.decrypted
-openssl rsa -in server.key -out server.key.decrypted
-
-passphrase for all 3 is whatever
-
-Next step is to update client.pem and server.pem,
-BEGIN ENCRYPTED KEY SECTIONS
-with the decrypted contents of client.key.decrypted and server.key.decrypted respectively.
-
-Then rename the decrypted files back to overwrite the encrypted key files.
-mv ca.key.decrypted ca.key
-mv client.key.decrypted client.key
-mv server.key.decrypted server.key
-
-Now update the test code in cord-tester/src/test/utils/EapTLS.py with the contents of client.crt and client.key.
-If you want, you can also populate the INVALID cert key field in cord-tester/src/test/tls/tlsTest.py with the contents of ca.pem (optional)
diff --git a/src/test/setup/radius-config/db/radius.sqlite3 b/src/test/setup/radius-config/db/radius.sqlite3
deleted file mode 100644
index 19a96c7..0000000
--- a/src/test/setup/radius-config/db/radius.sqlite3
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/README.rst b/src/test/setup/radius-config/freeradius/README.rst
deleted file mode 100644
index 590c023..0000000
--- a/src/test/setup/radius-config/freeradius/README.rst
+++ /dev/null
@@ -1,657 +0,0 @@
-Upgrading to Version 3.0
-========================
-
-.. contents:: Sections
-   :depth: 2
-
-.. important:: 
-   The configuration for 3.0 is *largely* compatible with the 2.x.x
-   configuration.  However, it is NOT possible to simply use the 2.x.x
-   configuration as-is.  Instead, you should re-create it.
-
-Security
---------
-
-A number of configuration items have moved into the "security"
-subsection of radiusd.conf.  If you use these, you should move them.
-Otherwise, they can be ignored.
-
-The list of moved options is::
-
-  chroot
-  user
-  group
-  allow_core_dumps
-  reject_delay
-  status_server
-
-These entries should be moved from "radiusd.conf" to the "security"
-subsection of that file.
-
-Naming
-------
-
-Many names used by configuration items were inconsistent in earlier
-versions of the server.  These names have been unified in version 3.0.
-
-If a file is being referenced or created the config item ``filename``
-is used.
-
-If a file is being created, the initial permissions are set by the
-``permissions`` config item.
-
-If a directory hierarchy needs to be created, the permissions are set
-by ``dir_permissions``.
-
-If an external host is referenced in the context of a module the
-``server`` config item is used.
-
-Unless the config item is a well recognised portmanteau
-(as ``filename`` is for example), it must be written as multiple
-distinct words separated by underscores ``_``.
-
-The configuration items ``file``, ``script_file``, ``module``,
-``detail``, ``detailfile``, ``attrsfile``, ``perm``, ``dirperm``,
-``detailperm``, and ``hostname`` are deprecated. As well as any false
-portmanteaus, and configuration items that used hyphens as word
-delimiters.  e.g. ``foo-bar`` has been changed to ``foo_bar``.  Please
-update your module configuration to use the new syntax.
-
-In most cases the server will tell you the replacement config item to
-use.  As always, run the server in debugging mode to see these
-messages.
-
-Modules Directory
------------------
-
-As of version 3.0, the ``modules/`` directory no longer exists.
-
-Instead, all "example" modules have been put into the
-``mods-available/`` directory.  Modules which can be loaded by the
-server are placed in the ``mods-enabled/`` directory.  All of the
-modules in that directory will be loaded.  This means that the
-``instantiate`` section of radiusd.conf is less important.  The only
-reason to list a module in the ``instantiate`` section is to force
-ordering when the modules are loaded.
-
-Modules can be enabled by creating a soft link.  For module ``foo``, do::
-
-  $ cd raddb
-  $ ln -s mods-available/foo mods-enabled/foo
-
-To create "local" versions of the modules, we suggest copying the file
-instead.  This leaves the original file (with documentation) in the
-``mods-available/`` directory.  Local changes should go into the
-``mods-enabled/`` directory.
-
-Module-specific configuration files are now in the ``mods-config/``
-directory.  This change allows for better organization, and means that
-there are fewer files in the main ``raddb`` directory.  See
-``mods-config/README.rst`` for more details.
-
-Changed Modules
----------------
-
-The following modules have been changed.
-
-
-rlm_sql
-~~~~~~~
-
-The SQL configuration has been moved from ``sql.conf`` to
-``mods-available/sql``.  The ``sqlippool.conf`` file has also been
-moved to ``mods-available/sqlippool``.
-
-The SQL module configuration has been changed.  The old connection
-pool options are no longer accepted::
-
-  num_sql_socks
-  connect_failure_retry_delay
-  lifetime
-  max_queries
-
-Instead, a connection pool configuration is used.  This configuration
-contains all of the functionality of the previous configuration, but
-in a more generic form.  It also is used in multiple modules, meaning
-that there are fewer different configuration items.  The mapping
-between the configuration items is::
-
-  num_sql_socks			-> pool { max }
-  connect_failure_retry_delay	-> pool { retry_delay }
-  lifetime			-> pool { lifetime }
-  max_queries			-> pool { uses }
-
-The pool configuration adds a number of new configuration options,
-which allow the administrator to better control how FreeRADIUS uses
-SQL connection pools.
-
-The following parameters have been changed::
-
-  trace				-> removed
-  tracefile			-> logfile
-
-The logfile is intended to log SQL queries performed.  If you need to
-debug the server, use debugging mode.  If ``logfile`` is set, then
-*all* SQL queries will go to ``logfile``.
-
-You can now use a NULL SQL database::
-
-  driver = rlm_sql_null
-
-This is an empty driver which will always return "success".  It is
-intended to be used to replace the ``sql_log`` module, and to work in
-conjunction with the ``radsqlrelay`` program.  Simply take your normal
-configuration for raddb/mods-enabled/sql, and set::
-
-  driver = rlm_sql_null
-  ...
-  logfile = ${radacctdir}/sql.log
-
-All of the SQL queries will be logged to that file.  The connection
-pool does not need to be configured for the ``null`` SQL driver.  It
-can be left as-is, or deleted from the SQL configuration file.
-
-rlm_sql_sybase
-~~~~~~~~~~~~~~
-
-The ``rlm_sql_sybase`` module has been renamed to ``rlm_sql_freetds``
-and the old ``rlm_sql_freetds`` module has been removed.
-
-``rlm_sql_sybase`` used the newer ct-lib API, and ``rlm_sql_freetds``
-used an older API and was incomplete.
-
-The new ``rlm_sql_freetds`` module now also supports database
-selection on connection startup so ``use`` statements no longer
-have to be included in queries.
-
-sql/dialup.conf
-~~~~~~~~~~~~~~~
-
-Queries for post-auth and accounting calls have been re-arranged.  The
-SQL module will now expand the 'reference' configuration item in the
-appropriate sub-section, and resolve this to a configuration
-item. This behaviour is similar to rlm_linelog.  This dynamic
-expansion allows for a dynamic mapping between accounting types and
-SQL queries.  Previously, the mapping was fixed.  Any "new" accounting
-type was ignored by the module.  Now, support for any accounting type
-can be added by just adding a new target, as below.
-
-Queries from v2.x.x may be manually copied to the new v3.0
-``dialup.conf`` file (``raddb/sql/main/<dialect>/queries.conf``).
-When doing this you may also need to update references to the
-accounting tables, as their definitions will now be outside of
-the subsection containing the query.
-
-The mapping from old "fixed" query to new "dynamic" query is as follows::
-
-  accounting_onoff_query		-> accounting.type.accounting-on.query
-  accounting_update_query		-> accounting.type.interim-update.query
-  accounting_update_query_alt		+> accounting.type.interim-update.query
-  accounting_start_query		-> accounting.type.start.query
-  accounting_start_query_alt		+> accounting.type.start.query
-  accounting_stop_query			-> accounting.type.stop.query
-  accounting_stop_query_alt		+> accounting.type.stop.query
-  postauth_query			-> post-auth.query
-
-Alternatively a 2.x.x config may be patched to work with the
-3.0 module by adding the following::
-
-  accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-	type {
-		accounting-on {
-			query = "${....accounting_onoff_query}"
-		}
-		accounting-off {
-			query = "${....accounting_onoff_query}"
-		}
-		start {
-			query = "${....accounting_start_query}"
-			query = "${....accounting_start_query_alt}"
-		}
-		interim-update {
-			query = "${....accounting_update_query}"
-			query = "${....accounting_update_query_alt}"
-		}
-		stop {
-			query = "${....accounting_stop_query}"
-			query = "${....accounting_stop_query_alt}"
-		}
-	}
-  }
-
-  post-auth {
-	query = "${..postauth_query}"
-  }
-
-In general, it is safer to migrate the configuration rather than
-trying to "patch" it, to make it look like a v2 configuration.
-
-Note that the sub-sections holding the queries are labelled
-``accounting-on``, and not ``accounting_on``.  The reason is that the
-names of these sections are taken directly from the
-``Accounting-Request`` packet, and the ``Acct-Status-Type`` field.
-The ``sql`` module looks at the value of that field, and then looks
-for a section of that name, in order to find the query to use.
-
-That process means that the server can be extended to support any new
-value of ``Acct-Status-Type``, simply by adding a named sub-section,
-and a query.  This behavior is preferable to that of v2, which had
-hard-coded queries for certain ``Acct-Status-Type`` values, and was
-ignored all other values.
-
-rlm_ldap
-~~~~~~~~
-
-The LDAP module configuration has been substantially changed.  Please
-read ``raddb/mods-available/ldap``.  It now uses a connection pool,
-just like the SQL module.
-
-Many of the configuration items remain the same, but they have been
-moved into subsections.  This change is largely cosmetic, but it makes
-the configuration clearer.  Instead of having a large set of random
-configuration items, they are now organized into logical groups.
-
-You will need to read your old LDAP configuration, and migrate it
-manually to the new configuration.  Simply copying the old
-configuration WILL NOT WORK.
-
-Users upgrading from 2.x.x who used to call the ldap module in
-``post-auth`` should now set ``edir_autz = yes``, and remove the ``ldap``
-module from the ``post-auth`` section.
-
-rlm_ldap and LDAP-Group
-~~~~~~~~~~~~~~~~~~~~~~~
-
-In 2.x.x the registration of the ``LDAP-Group`` pair comparison was done
-by the last instance of rlm_ldap to be instantiated. In 3.0 this has
-changed so that only the default ``ldap {}`` instance registers
-``LDAP-Group``.
-
-If ``<instance>-LDAP-Group`` is already used throughout your configuration
-no changes will be needed.
-
-rlm_ldap authentication
-~~~~~~~~~~~~~~~~~~~~~~~
-
-In 2.x.x the LDAP module had a ``set_auth_type`` configuration item,
-which forced ``Auth-Type := ldap``. This was removed in 3.x.x as it
-often did not work, and was not consistent with the rest of the
-server.  We generally recommend that LDAP should be used as a
-database, and that FreeRADIUS should do authentication.
-
-The only reason to use ``Auth-Type := ldap`` is when the LDAP server
-will not supply the "known good" password to FreeRADIUS, *and* where
-the Access-Request contains User-Password.  This situation happens
-only for Active Directory.  If you think you need to force ``Auth-Type
-:= ldap`` in other situations, you are very likely to be wrong.
-
-The following is an example of what should be inserted into the
-``authorize {}`` and ``authenticate {}`` sections of the relevant
-virtual-servers, to get functionality equivalent to v2.x::
-
-  authorize {
-    ...
-    ldap
-    if ((ok || updated) && User-Password) {
-      update control {
-	Auth-Type := ldap
-      }
-    }
-    ...
-  }
-  
-  authenticate {
-    ...
-    Auth-Type ldap {
-      ldap   
-    }
-    ...
-  }
-
-rlm_eap
-~~~~~~~
-
-The EAP configuration has been moved from ``eap.conf`` to
-``mods-available/eap``.  A new ``pwd`` subsection has been added for
-EAP-PWD.
-
-rlm_expiration & rlm_logintime
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The rlm_expiration and rlm_logintime modules no longer add a ``Reply-Message``,
-the same behaviour can be achieved checking the return code of the module and
-adding the ``Reply-Message`` with unlang::
-
-  expiration
-  if (userlock) {
-    update reply {
-      Reply-Message := "Your account has expired"
-    }
-  }
-
-rlm_unix
-~~~~~~~~
-
-The ``unix`` module does not have an ``authenticate`` section.  So you
-cannot set ``Auth-Type := System``.  The ``unix`` module has also been
-deleted from the examples in ``sites-available/``.  Listing it there
-has been deprecated for many years.
-
-The PAP module can do crypt authentication.  It should be used instead
-of Unix authentication.
-
-The Unix module still can pull the passwords from ``/etc/passwd``, or
-``/etc/shadow``.  This is done by listing it in the ``authorize``
-section, as is done in the examples in ``sites-available/``.  However,
-some systems using NIS or NSS will not supply passwords to the
-``unix`` module.  For those systems, we recommend putting users and
-passwords into a database, instead of relying on ``/etc/passwd``.
-
-New Modules
------------
-
-rlm_date
-~~~~~~~~
-
-Instances of rlm_date register an xlat method which can translate
-integer and date values to an arbitrarily formatted date time
-string, or an arbitrarily formated time string to an integer, 
-depending on the attribute type passed.
-
-rlm_rest
-~~~~~~~~
-
-The ``rest`` module is used to translate RADIUS requests into 
-RESTfull HTTP requests. Currently supported body types are JSON
-and POST.
-
-rlm_unpack
-~~~~~~~~~~
-
-The ``unpack`` module is used to turn data buried inside of binary
-attributes.  e.g. if we have ``Class = 0x00000001020304`` then::
-
-  Tmp-Integer-0 := "%{unpack:&Class 4 short}"
-
-will unpack octets 4 and 5 as a "short", which has value 0x0304.
-All integers are assumed to be in network byte order.
-
-rlm_yubikey
-~~~~~~~~~~~
-
-The ``yubikey`` module can be used to forward yubikey OTP token
-values to a Yubico validation server, or decrypt the token 
-using a PSK.
-
-Deleted Modules
----------------
-
-The following modules have been deleted, and are no longer supported
-in Version 3.  If you are using one of these modules, your
-configuration can probably be changed to not need it.  Otherwise email
-the freeradius-devel list, and ask about the module.
-
-rlm_acct_unique
-~~~~~~~~~~~~~~~
-
-This module has been replaced by the "acct_unique" policy.  See
-raddb/policy.d/accounting.
-
-The method for calculating the value of acct_unique has changed.
-However, as this method was configurable, this change should not
-matter.  The only issue is in having a v2 and v3 server writing to the
-same database at the same time.  They will calculate different values
-for Acct-Unique-Id.
-
-rlm_acctlog
-~~~~~~~~~~~
-
-You should use rlm_linelog instead.  That module has a superset of the
-acctlog functionality.
-
-rlm_attr_rewrite
-~~~~~~~~~~~~~~~~
-
-The attr_rewrite module looked for an attribute, and then re-wrote it,
-or created a new attribute.  All of that can be done in "unlang".
-
-A sample configuration in "unlang" is::
-
-  if (request:Calling-Station-Id) {
-    update request {
-      Calling-Station-Id := "...."
-    }
-  }
-
-We suggest updating all uses of attr_rewrite to use unlang instead.
-
-rlm_checkval
-~~~~~~~~~~~~
-
-The checkval module compared two attributes.  All of that can be done in "unlang"::
-
-  if (&request:Calling-Station-Id == &control:Calling-Station-Id) {
-    ok
-  }
-
-We suggest updating all uses of checkval to use unlang instead.
-
-rlm_dbm
-~~~~~~~
-
-No one seems to use it.  There is no sample configuration for it.
-There is no speed advantage to using it over the "files" module.
-Modern systems are fast enough that 10K entries can be read from the
-"users" file in about 10ms.  If you need more users than that, use a
-real database such as SQL.
-
-rlm_fastusers
-~~~~~~~~~~~~~
-
-No one seems to use it.  It has been deprecated since Version 2.0.0.
-The "files" module was rewritten so that the "fastusers" module was no
-longer necessary.
-
-rlm_policy
-~~~~~~~~~~
-
-No one seems to use it.  Almost all of its functionality is available
-via "unlang".
-
-rlm_sim_files
-~~~~~~~~~~~~~
-
-The rlm_sim_files module has been deleted.  It was never marked "stable",
-and was never used in a production environment.  There are better ways
-to test EAP.
-
-If you want similar functionality, see rlm_passwd.  It can read CSV
-files, and create attributes from them.
-
-rlm_sql_log
-~~~~~~~~~~~
-
-This has been replaced with the "null" sql driver.  See
-raddb/mods-available/sql for an example configuration.
-
-The main SQL module has more functionality than rlm_sql_log, and
-results in less code in the server.
-
-Other Functionality
--------------------
-
-The following is a list of new / changed functionality.
-
-RadSec
-~~~~~~
-
-RadSec (or RADIUS over TLS) is now supported.  RADIUS over bare TCP
-is also supported, but is recommended only for secure networks.
-
-See ``sites-available/tls`` for complete details on using TLS.  The server
-can both receive incoming TLS connections, and also originate outgoing
-TLS connections.
-
-The TLS configuration is taken from the old EAP-TLS configuration.  It
-is largely identical to the old EAP-TLS configuration, so it should be
-simple to use and configure.  It re-uses much of the EAP-TLS code,
-so it is well-tested and reliable.
-
-Once RadSec is enabled, normal debugging mode will not work.  This is
-because the TLS code requires threading to work properly.  Instead of doing::
-
-  $ radiusd -X
-
-you will need to do::
-
-  $ radiusd -fxx -l stdout
-
-That's the price to pay for using RadSec.  This limitation may be
-lifted in a future version of the server.
-
-
-PAP and User-Password
-~~~~~~~~~~~~~~~~~~~~~
-
-From version 3.0 onwards the server no longer supports authenticating
-against a cleartext password in the 'User-Password' attribute. Any
-occurences of this (for instance, in the users file) should now be changed
-to 'Cleartext-Password' instead.
-
-e.g. change entries like this::
-
-  bob User-Password == "hello"
-
-to ones like this::
-
-  bob Cleartext-Password := "hello"
-
-
-If this is not done, authentication will likely fail.  The server will
-also print a helpful message in debugging mode.
-
-If it really is impossible to do this, the following unlang inserted above
-the call to the pap module may be used to copy User-Password to the correct
-attribute::
-
-  if (!control:Cleartext-Password && control:User-Password) {
-    update control {
-      Cleartext-Password := "%{control:User-Password}"
-    }
-  }
-
-However, this should only be seen as a temporary, not permanent, fix.
-It is better to fix your databases to use the correct configuration.
-
-Unlang
-~~~~~~
-
-The unlang policy language is compatible with v2, but has a number of
-new features.  See ``man unlang`` for complete documentation.
-
-ERRORS
-
-Many more errors are caught when the server is starting up.  Syntax
-errors in ``unlang`` are caught, and a helpful error message is
-printed.  The error message points to the exact place where the error
-occurred::
-
-  ./raddb/sites-enabled/default[230]: Parse error in condition
-  ERROR:  if (User-Name ! "bob") {
-  ERROR:                ^ Invalid operator
-
-``update`` sections are more generic.  Instead of doing ``update
-reply``, you can do the following::
-
-  update {
-	  reply:Class := 0x0000
-	  control:Cleartext-Password := "hello"
-  }
-
-This change means that you need fewer ``update`` sections.
-
-COMPARISONS
-
-Attribute comparisons can be done via the ``&`` operator.  When you
-needed to compare two attributes, the old comparison style was::
-
-  if (User-Name == "%{control:Tmp-String-0}") {
-
-This syntax is inefficient, as the ``Tmp-String-0`` attribute would be
-printed to an intermediate string, causing unnecessary work.  You can
-now instead compare the two attributes directly::
-
-  if (&User-Name == &control:Tmp-String-0) {
-
-See ``man unlang`` for more details.
-
-CASTS
-
-Casts are now permitted.  This allows you to force type-specific
-comparisons::
-
-  if (<ipaddr>"%{sql: SELECT...}" == 127.0.0.1) {
-
-This forces the string returned by the SELECT to be treated as an IP
-address, and compare to ``127.0.0.1``.  Previously, the comparison
-would have been done as a simple string comparison.
-
-NETWORKS
-
-IP networks are now supported::
-
-  if (127.0.0.1/32 == 127.0.0.1) {
-
-Will be ``true``.  The various comparison operators can be used to
-check IP network membership::
-
-  if (127/8 > 127.0.0.1) {
-
-Returns ``true``, because ``127.0.0.1`` is within the ``127/8``
-network.  However, the following comparison will return ``false``::
-
-  if (127/8 > 192.168.0.1) {
-
-because ``192.168.0.1`` is outside of the ``127/8`` network.
-
-OPTIMIZATION
-
-As ``unlang`` is now pre-compiled, many compile-time optimizations are
-done.  This means that the debug output may not be exactly the same as
-what is in the configuration files::
-
-  if (0 && (User-Name == "bob')) {
-
-The result will always be ``false``, as the ``if 0`` prevents the
-following ``&& ...`` from being evaluated.
-
-Not only that, but the entire contents of that section will be ignored
-entirely::
-
-  if (0) {
-      this_module_does_not_exist
-      and_this_one_does_not_exist_either
-  }
-
-In v2, that configuration would result in a parse error, as there is
-no module called ``this_module_does_not_exist``.  In v3, that text is
-ignored.  This ability allows you to have dynamic configurations where
-certain parts are used (or not) depending on compile-time configuration.
-
-Similarly, conditions which always evaluate to ``true`` will be
-optimized away::
-
-  if (1) {
-      files
-  }
-
-That configuration will never show the ``if (1)`` output in debugging mode.
-
-
-Dialup_admin
-------------
-
-The dialip_admin directory has been removed.  No one stepped forward
-to maintain it, and the code had not been changed in many years.
-
diff --git a/src/test/setup/radius-config/freeradius/certs/01.pem b/src/test/setup/radius-config/freeradius/certs/01.pem
deleted file mode 100644
index 246df1b..0000000
--- a/src/test/setup/radius-config/freeradius/certs/01.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 1 (0x1)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:35 2016 GMT
-            Not After : Mar  6 18:53:35 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
-                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
-                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
-                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
-                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
-                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
-                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
-                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
-                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
-                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
-                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
-                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
-                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
-                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
-                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
-                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
-                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
-                    de:a9
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
-         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
-         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
-         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
-         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
-         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
-         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
-         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
-         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
-         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
-         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
-         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
-         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
-         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
-         8b:78:9b:6f
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
-MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
-zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
-NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
-cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
-bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
-NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
-UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
-kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
-tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
-eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
-zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
-ceVGJAlllSIbNYt4m28=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/02.pem b/src/test/setup/radius-config/freeradius/certs/02.pem
deleted file mode 100644
index 1197fec..0000000
--- a/src/test/setup/radius-config/freeradius/certs/02.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 2 (0x2)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:36 2016 GMT
-            Not After : Mar  6 18:53:36 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
-                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
-                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
-                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
-                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
-                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
-                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
-                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
-                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
-                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
-                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
-                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
-                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
-                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
-                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
-                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
-                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
-                    6b:ff
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
-         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
-         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
-         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
-         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
-         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
-         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
-         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
-         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
-         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
-         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
-         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
-         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
-         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
-         c2:15:2d:ca
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/07a45775.0 b/src/test/setup/radius-config/freeradius/certs/07a45775.0
deleted file mode 120000
index 799a1c6..0000000
--- a/src/test/setup/radius-config/freeradius/certs/07a45775.0
+++ /dev/null
@@ -1 +0,0 @@
-client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/19a996e2.0 b/src/test/setup/radius-config/freeradius/certs/19a996e2.0
deleted file mode 120000
index 799a1c6..0000000
--- a/src/test/setup/radius-config/freeradius/certs/19a996e2.0
+++ /dev/null
@@ -1 +0,0 @@
-client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/34e00910.0 b/src/test/setup/radius-config/freeradius/certs/34e00910.0
deleted file mode 120000
index 55f0c91..0000000
--- a/src/test/setup/radius-config/freeradius/certs/34e00910.0
+++ /dev/null
@@ -1 +0,0 @@
-01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/865470fd.0 b/src/test/setup/radius-config/freeradius/certs/865470fd.0
deleted file mode 120000
index e375f5a..0000000
--- a/src/test/setup/radius-config/freeradius/certs/865470fd.0
+++ /dev/null
@@ -1 +0,0 @@
-ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/8fe581ba.0 b/src/test/setup/radius-config/freeradius/certs/8fe581ba.0
deleted file mode 120000
index 55f0c91..0000000
--- a/src/test/setup/radius-config/freeradius/certs/8fe581ba.0
+++ /dev/null
@@ -1 +0,0 @@
-01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/Makefile b/src/test/setup/radius-config/freeradius/certs/Makefile
deleted file mode 100644
index c8f0892..0000000
--- a/src/test/setup/radius-config/freeradius/certs/Makefile
+++ /dev/null
@@ -1,140 +0,0 @@
-######################################################################
-#
-#	Make file to be installed in /etc/raddb/certs to enable
-#	the easy creation of certificates.
-#
-#	See the README file in this directory for more information.
-#
-#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
-#
-######################################################################
-
-DH_KEY_SIZE	= 1024
-
-#
-#  Set the passwords
-#
-PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
-
-USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
-CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
-
-######################################################################
-#
-#  Make the necessary files, but not client certificates.
-#
-######################################################################
-.PHONY: all
-all: index.txt serial dh random server ca client
-
-.PHONY: client
-client: client.pem
-
-.PHONY: ca
-ca: ca.der
-
-.PHONY: server
-server: server.pem server.vrfy
-
-######################################################################
-#
-#  Diffie-Hellman parameters
-#
-######################################################################
-dh:
-	openssl dhparam -out dh $(DH_KEY_SIZE)
-
-######################################################################
-#
-#  Create a new self-signed CA certificate
-#
-######################################################################
-ca.key ca.pem: ca.cnf
-	@[ -f index.txt ] || $(MAKE) index.txt
-	@[ -f serial ] || $(MAKE) serial
-	openssl req -new -x509 -keyout ca.key -out ca.pem \
-		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
-
-ca.der: ca.pem
-	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
-
-######################################################################
-#
-#  Create a new server certificate, signed by the above CA.
-#
-######################################################################
-server.csr server.key: server.cnf
-	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
-
-server.crt: server.csr ca.key ca.pem
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
-
-server.p12: server.crt
-	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-server.pem: server.p12
-	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-.PHONY: server.vrfy
-server.vrfy: ca.pem
-	@openssl verify -CAfile ca.pem server.pem
-
-######################################################################
-#
-#  Create a new client certificate, signed by the the above server
-#  certificate.
-#
-######################################################################
-client.csr client.key: client.cnf
-	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-
-client.crt: client.csr ca.pem ca.key
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-
-client.p12: client.crt
-	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-
-client.pem: client.p12
-	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-	cp client.pem $(USER_NAME).pem
-
-.PHONY: client.vrfy
-client.vrfy: ca.pem client.pem
-	c_rehash .
-	openssl verify -CApath . client.pem
-
-######################################################################
-#
-#  Miscellaneous rules.
-#
-######################################################################
-index.txt:
-	@touch index.txt
-
-serial:
-	@echo '01' > serial
-
-random:
-	@if [ -c /dev/urandom ] ; then \
-		ln -sf /dev/urandom random; \
-	else \
-		date > ./random; \
-	fi
-
-print:
-	openssl x509 -text -in server.crt
-
-printca:
-	openssl x509 -text -in ca.pem
-
-clean:
-	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
-
-#
-#	Make a target that people won't run too often.
-#
-destroycerts:
-	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
-			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs/README b/src/test/setup/radius-config/freeradius/certs/README
deleted file mode 100644
index f7e0591..0000000
--- a/src/test/setup/radius-config/freeradius/certs/README
+++ /dev/null
@@ -1,226 +0,0 @@
-  This directory contains scripts to create the server certificates.
-To make a set of default (i.e. test) certificates, simply type:
-
-$ ./bootstrap
-
-  The "openssl" command will be run against the sample configuration
-files included here, and will make a self-signed certificate authority
-(i.e. root CA), and a server certificate.  This "root CA" should be
-installed on any client machine needing to do EAP-TLS, PEAP, or
-EAP-TTLS.
-
-  The Microsoft "XP Extensions" will be automatically included in the
-server certificate.  Without those extensions Windows clients will
-refuse to authenticate to FreeRADIUS.
-
-  The root CA and the "XP Extensions" file also contain a crlDistributionPoints
-attribute. The latest release of Windows Phone needs this to be present
-for the handset to validate the RADIUS server certificate. The RADIUS
-server must have the URI defined but the CA need not have...however it
-is best practice for a CA to have a revocation URI. Note that whilst
-the Windows Mobile client cannot actually use the CRL when doing 802.1X
-it is recommended that the URI be an actual working URL and contain a
-revocation format file as there may be other OS behaviour at play and
-future OSes that may do something with that URI.
-
-  In general, you should use self-signed certificates for 802.1x (EAP)
-authentication.  When you list root CAs from other organisations in
-the "ca_file", you permit them to masquerade as you, to authenticate
-your users, and to issue client certificates for EAP-TLS.
-
-  If FreeRADIUS was configured to use OpenSSL, then simply starting
-the server in root in debugging mode should also create test
-certificates, i.e.:
-
-$ radiusd -X
-
-  That will cause the EAP-TLS module to run the "bootstrap" script in
-this directory.  The script will be executed only once, the first time
-the server has been installed on a particular machine.  This bootstrap
-script SHOULD be run on installation of any pre-built binary package
-for your OS.  In any case, the script will ensure that it is not run
-twice, and that it does not over-write any existing certificates.
-
-  If you already have CA and server certificates, rename (or delete)
-this directory, and create a new "certs" directory containing your
-certificates.  Note that the "make install" command will NOT
-over-write your existing "raddb/certs" directory, which means that the
-"bootstrap" command will not be run.
-
-
-		NEW INSTALLATIONS OF FREERADIUS
-
-
-  We suggest that new installations use the test certificates for
-initial tests, and then create real certificates to use for normal
-user authentication.  See the instructions below for how to create the
-various certificates.  The old test certificates can be deleted by
-running the following command:
-
-$ rm -f *.pem *.der *.csr *.crt *.key *.p12 serial* index.txt*
-
-  Then, follow the instructions below for creating real certificates.
-
-  Once the final certificates have been created, you can delete the
-"bootstrap" command from this directory, and delete the
-"make_cert_command" configuration from the "tls" sub-section of
-eap.conf.
-
-  If you do not want to enable EAP-TLS, PEAP, or EAP-TTLS, then delete
-the relevant sub-sections from the "eap.conf" file.
-
-
-		MAKING A ROOT CERTIFICATE
-
-
-$ vi ca.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the CA certificate.
-
-  Edit the [certificate_authority] section to have the correct values
-  for your country, state, etc.
-
-$ make ca.pem
-
-  This step creates the CA certificate.
-
-$ make ca.der
-
-  This step creates the DER format of the self-signed certificate,
-  which is can be imported into Windows.
-
-
-		MAKING A SERVER CERTIFICATE
-
-
-$ vi server.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the server certificate.
-
-  Edit the [server] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  different from the commonName for the CA certificate.
-
-$ make server.pem
-
-  This step creates the server certificate.
-
-  If you have an existing certificate authority, and wish to create a
-  certificate signing request for the server certificate, edit
-  server.cnf as above, and type the following command.
-
-$ make server.csr
-
-  You will have to ensure that the certificate contains the XP
-  extensions needed by Microsoft clients.
-
-
-		MAKING A CLIENT CERTIFICATE
-
-
-  Client certificates are used by EAP-TLS, and optionally by EAP-TTLS
-and PEAP.  The following steps outline how to create a client
-certificate that is signed by the server certificate created above.
-You will have to have the password for the server certificate in the
-"input_password" and "output_password" fields of the server.cnf file.
-
-
-$ vi client.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the client certificate.  You will have to give these
-  passwords to the end user who will be using the certificates.
-
-  Edit the [client] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  the User-Name that will be used for logins!
-
-$ make client.pem
-
-  The users certificate will be in "emailAddress.pem",
-  i.e. "user@example.com.pem".
-
-  To create another client certificate, just repeat the steps for
-  making a client certificate, being sure to enter a different login
-  name for "commonName", and a different password.
-
-
-		PERFORMANCE
-
-
-  EAP performance for EAP-TLS, TTLS, and PEAP is dominated by SSL
-  calculations.  That is, a normal system can handle PAP
-  authentication at a rate of 10k packets/s.  However, SSL involves
-  RSA calculations, which are very expensive.  To benchmark your system,
-  do:
-
-$ openssl speed rsa
-
-  or
-
-$ openssl speed rsa2048
-
-  to test 2048 bit keys.
-
-  A 1GHz system will likely do 30 calculations/s.  A 2GHz system may
-  do 50 calculations/s, or more.  That number is also the number of
-  authentications/s that can be done for EAP-TLS (or TTLS, or PEAP).
-
-
-		COMPATIBILITY
-
-The certificates created using this method are known to be compatible
-with ALL operating systems.  Some common issues are:
-
-  - Windows requires certain OIDs in the certificates.  If it doesn't
-    see them, it will stop doing EAP.  The most visible effect is
-    that the client starts EAP, gets a few Access-Challenge packets,
-    and then a little while later re-starts EAP.  If this happens, see
-    the FAQ, and the comments in raddb/eap.conf for how to fix it.
-
-  - Windows requires the root certificates to be on the client PC.
-    If it doesn't have them, you will see the same issue as above.
-
-  - Windows XP post SP2 has a bug where it has problems with
-    certificate chains.  i.e. if the server certificate is an
-    intermediate one, and not a root one, then authentication will
-    silently fail, as above.
-
-  - Some versions of Windows CE cannot handle 4K RSA certificates.
-    They will (again) silently fail, as above.
-
-  - In none of these cases will Windows give the end user any
-    reasonable error message describing what went wrong.  This leads
-    people to blame the RADIUS server.  That blame is misplaced.
-
-  - Certificate chains of more than 64K bytes are known to not work.
-    This is a problem in FreeRADIUS.  However, most clients cannot
-    handle 64K certificate chains.  Most Access Points will shut down
-    the EAP session after about 50 round trips, while 64K certificate
-    chains will take about 60 round trips.  So don't use large
-    certificate chains.  They will only work after everyone upgrade
-    everything in the network.
-
-  - All other operating systems are known to work with EAP and
-    FreeRADIUS.  This includes Linux, *BSD, Mac OS X, Solaris,
-    Symbian, along with all known embedded systems, phones, WiFi
-    devices, etc.
-
-  - Someone needs to ask Microsoft to please stop making life hard for
-    their customers.
-
-
-		SECURITY CONSIDERATIONS
-
-The default certificate configuration files uses MD5 for message
-digests, to maintain compatibility with network equipment that
-supports only this algorithm.
-
-MD5 has known weaknesses and is discouraged in favour of SHA1 (see
-http://www.kb.cert.org/vuls/id/836068 for details). If your network
-equipment supports the SHA1 signature algorithm, we recommend that you
-change the "ca.cnf", "server.cnf", and "client.cnf" files to specify
-the use of SHA1 for the certificates. To do this, change the
-'default_md' entry in those files from 'md5' to 'sha1'.
diff --git a/src/test/setup/radius-config/freeradius/certs/bootstrap b/src/test/setup/radius-config/freeradius/certs/bootstrap
deleted file mode 100755
index 82f93ec..0000000
--- a/src/test/setup/radius-config/freeradius/certs/bootstrap
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/sh
-#
-#  This is a wrapper script to create default certificates when the
-#  server first starts in debugging mode.  Once the certificates have been
-#  created, this file should be deleted.
-#
-#  Ideally, this program should be run as part of the installation of any
-#  binary package.  The installation should also ensure that the permissions
-#  and owners are correct for the files generated by this script.
-#
-#  $Id: c9d939beac8d5bdc21ea1ff9233442f9ab933297 $
-#
-umask 027
-cd `dirname $0`
-
-make -h > /dev/null 2>&1
-
-#
-#  If we have a working "make", then use it.  Otherwise, run the commands
-#  manually.
-#
-if [ "$?" = "0" ]; then
-  make all
-  exit $?
-fi
-
-#
-#  The following commands were created by running "make -n", and edited
-#  to remove the trailing backslash, and to add "exit 1" after the commands.
-#
-#  Don't edit the following text.  Instead, edit the Makefile, and
-#  re-generate these commands.
-#
-if [ ! -f dh ]; then
-  openssl dhparam -out dh 1024 || exit 1
-  if [ -e /dev/urandom ] ; then
-	ln -sf /dev/urandom random
-  else
-	date > ./random;
-  fi
-fi
-
-if [ ! -f server.key ]; then
-  openssl req -new  -out server.csr -keyout server.key -config ./server.cnf || exit 1
-fi
-
-if [ ! -f ca.key ]; then
-  openssl req -new -x509 -keyout ca.key -out ca.pem -days `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'` -config ./ca.cnf || exit 1
-fi
-
-if [ ! -f index.txt ]; then
-  touch index.txt
-fi
-
-if [ ! -f serial ]; then
-  echo '01' > serial
-fi
-
-if [ ! -f server.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf || exit 1
-fi
-
-if [ ! -f server.p12 ]; then
-  openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-fi
-
-if [ ! -f server.pem ]; then
-  openssl pkcs12 -in server.p12 -out server.pem -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-  openssl verify -CAfile ca.pem server.pem || exit 1
-fi
-
-if [ ! -f ca.der ]; then
-  openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der || exit 1
-fi
-
-if [ ! -f client.key ]; then
-  openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-fi
-
-if [ ! -f client.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-fi
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.cnf b/src/test/setup/radius-config/freeradius/certs/ca.cnf
deleted file mode 100644
index 37207e8..0000000
--- a/src/test/setup/radius-config/freeradius/certs/ca.cnf
+++ /dev/null
@@ -1,62 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= certificate_authority
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-x509_extensions		= v3_ca
-
-[certificate_authority]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Certificate Authority"
-
-[v3_ca]
-subjectKeyIdentifier	= hash
-authorityKeyIdentifier	= keyid:always,issuer:always
-basicConstraints	= CA:true
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.der b/src/test/setup/radius-config/freeradius/certs/ca.der
deleted file mode 100644
index a505cfc..0000000
--- a/src/test/setup/radius-config/freeradius/certs/ca.der
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.key b/src/test/setup/radius-config/freeradius/certs/ca.key
deleted file mode 100644
index f6ce685..0000000
--- a/src/test/setup/radius-config/freeradius/certs/ca.key
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQInjXhKnIFIgUCAggA
-MBQGCCqGSIb3DQMHBAh+B3FNG/y9LQSCBMhDZu1kDr0UGMnD8mpB7H319e8GLakT
-8jzPI+bxethA4ZthxY3x5Jxcvyhzy92pR7lCG2Sr8HOVhPpxmp3W5HhbrDhdOj+Q
-qy4Y00G2JCos2wVKTecAOgE5a3i2hDVJDsGxqfwqdohBUwhxVwGDxyzelClN3tNW
-xlj6YuPdUly5tmS1Jo0vtw94QtKk1N0JlNxkTz98vfvgxD4vHqMZugfV2EX2P985
-taRb2bX8VE5rh7CkNhYmYxyv5LACM+7IyM4yvUBfqJG0JPg4UKDVya1dm2mHb7+I
-6C7UcOM7phMZeHNT1gixzSl1UFEtBJaPgmxKIIyBUls7bgKOtNf+BNo+OTWfrnH+
-nvMAoEEPr0GT/fD1tpiR4JaRYXMUHrUt8kGw3Kayr2q9HYJuDeb1uwxK/ml+7aGN
-km9IEV/8Cc0/7TGSQR8jqS+evEy9Sv7tiB2rGnwB6hsbvT+l3jQdv/cX00vMPhRA
-g2KBqifiXRnZnYWlY1KAhZZm0BqJuohYko+xJ2yL2I5q8H7UooN+ND8nsaROOELq
-4FdwJd3MaHEgEnyPL5zFrCcv/R/v1GZNJGt0GT2marwPjLTkGcvDZvbX44a/InyV
-IlYZA/JNmWP6NlABhZIf/mzxnOWPjBwqq2y8Wg2PXpYrumj1nIUoO5B96YYo6lvS
-wlAjIYAxLy8IsUMURDfpvm2CCIW0aGTwO8YSyWnxLBa27MG8fWubjJafm+K4SOf3
-uLiBWlbrkCG7jvNL07/mnkjlP4n05Olb5nrpeTKOz3nklbQmsBhC5/OHZj7ZlUul
-gAR6/U3B0sefMsdqxuls3w8qfgrV1oQGATxvWgYs5zFa/bXBSN1L+brc2q+8ZtgR
-GkFIwnXPWiKB7GIlamER24a1nctR4vL+sYmpmlav+OS6n/jItTCYed+dQ5inC3hX
-4rdGiAjylaTDkW7k4dtIXGUJNGZbIxrpAqNYOVYrCyAEj+HdpNuTUUO2vohq+EM1
-og7SeLhsVg1bG3lYRaqZaXjsof2NAruFJ8aH93DcwoClxFjNJxOd9YAXIA83Uvz8
-D2Bu1/Z41Grq8O7YEnrYbxJP77G9PAgCLt2Uc16O91Lpg1gZ3gESX2BmuR38wbyv
-t5MoC1/oSBV+643yq2ldQRYOMSKl/CLoApywcatdHCIiDC3AEIklueG5jA9Diutl
-ZfK8XSpBEYPQm+eHLdfUOTTnF3SoNPDGbm102nKyvgmGpReFgREYyZSwvg/1YuL/
-m8S+lR+gmP3i9Y4/0UcccI24tO5s0FI4od/4BZ4NW9JsYKxCTj/WJCH4bpmjtmwK
-WI1XSxso1ueVQ7qJBVJyEsMa480nJ5GMKoEfzhqzGzvT1awcz5y/Q/4vIjGZVmR2
-GekRkn9uadPQnIsYGX99A5gPAXP+oCJ9MqLXZPdWLXdm0OybAkD++ryKfi3DNYq2
-TO4hcHLi7lEaIgDcOt+RWTkF0y6yZ3vnY6llvQTRF7fe+6R4YJg0On69+Lt6BoZw
-hmgaaR8YJl++eFWzCJjdJJrCPIiQginbGbpks2Zrz5hGGcQhNwomRX5DFVouePK5
-qhd54Myo2di+Fu0Ls86+nFwnIs9s1+c/2rDWzV1aRfEjnv3OUSLi1saoXjiunMBq
-/L4=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.pem b/src/test/setup/radius-config/freeradius/certs/ca.pem
deleted file mode 100644
index 916cdf9..0000000
--- a/src/test/setup/radius-config/freeradius/certs/ca.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAM6l2jUG56pLMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
-ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjAzMTEx
-ODUzMzVaFw0xNzAzMDYxODUzMzVaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
-Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAL9Jv54TkqycL3U2Fdd/y5NXdnPVXwAVV3m6I3eIffVCv8eS+mwlbl9dnbjo
-qqlGEgA3sEg5HtnKoW81l3PSyV/YaqzUzbcpDlgWlbNkFQ3nVxh61gSU34Fc4h/W
-plSvCkwGSbV5udLtEe6S9IflP2Fu/eXa9vmUtoPqDk66p9U/nWVf2H1GJy7XanWg
-wke+HpQvbzoSfPJS0e5Rm9KErrzaIkJpqt7soW+OjVJitUax7h45RYY1HHHlbMQ0
-ndWW8UDsCxFQO6d7nsijCzY69Y8HarH4mbVtqhg3KJevxD9UMRy6gdtPMDZLah1c
-LHRu14ucOK4aF8oICOgtcD06auUCAwEAAaOCASwwggEoMB0GA1UdDgQWBBQwEs0m
-c8HARTVp21wtiwgav5biqjCBwAYDVR0jBIG4MIG1gBQwEs0mc8HARTVp21wtiwga
-v5biqqGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
-EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDOpdo1BueqSzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAK+fyAFO8CbH35P5mOX+5wf7+AeC+5pwaFcoCV0zlfwniANp
-jISgcIX9rcetLxeYRAO5com3+qLdd9dGVNL0kwufH4QhlSPErG7OLHHAs4JWVhUo
-bH3lK9lgFVlnCDBtQhslzqScR64SCicWcQEjv3ZMZsJwYLvl8unSaKz4+LVPeJ2L
-opCpmZw/V/S2NhBbe3QjTiRPmDev2gbaO4GCfi/6sCDU7UO3o8KryrkeeMIiFIej
-gfwn9fovmpeqCEyupy2JNNUTJibEuFknwx7JAX+htPL27nEgwV1FYtwI3qLiZqkM
-729wo9cFSslJNZBu+GsBP5LszQSuvNTDWytV+qY=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.cnf b/src/test/setup/radius-config/freeradius/certs/client.cnf
deleted file mode 100644
index 994d3ab..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.cnf
+++ /dev/null
@@ -1,53 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= client
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-
-[client]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= user@ciena.com
-commonName		= user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/client.crt b/src/test/setup/radius-config/freeradius/certs/client.crt
deleted file mode 100644
index 1197fec..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.crt
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 2 (0x2)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:36 2016 GMT
-            Not After : Mar  6 18:53:36 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
-                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
-                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
-                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
-                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
-                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
-                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
-                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
-                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
-                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
-                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
-                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
-                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
-                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
-                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
-                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
-                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
-                    6b:ff
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
-         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
-         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
-         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
-         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
-         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
-         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
-         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
-         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
-         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
-         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
-         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
-         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
-         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
-         c2:15:2d:ca
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.csr b/src/test/setup/radius-config/freeradius/certs/client.csr
deleted file mode 100644
index 8f8a518..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.csr
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICwDCCAagCAQAwezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
-EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHTAbBgkqhkiG9w0BCQEW
-DnVzZXJAY2llbmEuY29tMRcwFQYDVQQDFA51c2VyQGNpZW5hLmNvbTCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgC
-NXXl2VFzKLNNvB9PS6D7ZBsQ5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlG
-MQJUbRD9V/oqszMX4k++iAOKtIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5
-+CY9WAicXyy+VEV3zTphZZDROjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tI
-gZb48RS1NPyN/XkCYzl3bv21qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9
-uBLBzcQt+4Rd5pLSfi21WM392Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaAA
-MA0GCSqGSIb3DQEBCwUAA4IBAQB030zqg/C6+0rwf+nsdQJvpUGFVCT3WJRf7Qx5
-NC3n6hfetLHs7XjPZ77CI2B1VEPE7r55Mv1m81b1+2WO/jFQXlM52CteOSLy/Zsj
-lUBW4naaCa+C3liOn1cSONNClvKMGl2DcTbOFO8j9A3dAOHUR05SeAtGutVip9CS
-NPl36MmwFUO0p25UkmG4IJIZPVaMEjqEPVjWxnRFrajFwsbyMkHEFIEvQ/TP1qpN
-LzLmp+Y4flS4O7zC3AAt4Zayr4AC5cf4JKDJxxfZ+qE0KS7jV4bJdo5hxpGz4ECC
-/LDZPZN9oGr67bNSjM4+Ogdx5v4Huojn/lQPK4gTME8SIUPX
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.key b/src/test/setup/radius-config/freeradius/certs/client.key
deleted file mode 100644
index a2e92c3..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.key
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIY2/Sy6WJDBYCAggA
-MBQGCCqGSIb3DQMHBAgzT2/EDIfXJASCBMjE2epab8bFXVgs1E2P02G/LzexUvvO
-gH9GltVbzVSmsuWNKaGBqWvRNiKrIiGIBQpNZsdV/0ae/5Etw3qkvcO/vTiPAOgC
-3+vkaNZMIpI4jePvvCzeUlnybg4+WBq6aXZLRQc8uCWkApH/HfcnwxCKEKebPqj5
-F1xmzT8WOSRJYytrwzU7GWQtsDwFCv0KnTJeYni9CVLIX8jFLtMB1mRhYZ93eSiM
-DjsSr2OH/AOiZQEzCv5YMbDk4WD9L1MD5S62bpWxdwG/aEr8E1dI1Z2TJHzx75dR
-lWdoV1BQHfKmsQRtwnZ/Hq6zmzY+SStJGFUcRdBdLdJrfRcIyTJXVkFYoVMM/PDl
-UT4K0pIcDILH7jPNp7kuDfSDigFNvqk4O6GybN+TT7cQKH5oGtEsvGSOfUYZUEvp
-KV4rpyR+n3NPC1tEoOvfuGlqHDGN62pdTVhFM/FqFbZSEoTzlSU+OecLiQrGtS6T
-hrmWt/go20MxTlWh87L0s9SRalP4A2YkufHBst8oSgwI4DzVhifqqWD87w7iL0Ur
-6drgbtlM2hY3onkwS2+oSzEIIlwLwaBaAt2hnVosgZIQajcmlayIhRQ1SNsYYj2T
-YTTTYxPWwUaIYzOl7Ri1OoD5dSFY84sUAD7odLMpzmEJQIi31KYIdOs1BN5oDpFV
-GbcKtF7sKw2QBb8nZgADobpCHIJIG/SLNqx4UgSZYgLVUgW0xaS8+8ylVLqRkIjM
-yoGkxqezc2pvCAbH8BMGYaZei5TL9GHanae+t6caBK9Zty6m9bdT9H9EkC6NEWhX
-IuKGZjyq/+O1mFK/66ts+tq9mynyZfVxxAKINijGLEWKPL0KAZkZIfFnCfXO7kK/
-JJNp5zE8GX9FFT5w8sq2UTsfS/F6K8kih+gZVJtj4irnWiABLq4VQjBRPeJJFt5Q
-Zki48dH5JP1/0222mka+ynRfv6pAtSN1Y5vx2mDPNoxiajhfMoLAxFkwwUYA3AfI
-DMTByk7n27HfWtmkUV+Zx263NVkCU0/BjOE3j7N1OojSuCizJRIT199hRhmnTFoy
-FPRrmYF4g/HU1ca6u8If5JzZAfJIqN8H9oHHTdWg5HuU31NpQPHgQqRGvaaBpuwc
-oglzg6mhl/4dUlZiw6l7bJGMojid24iTMgt6FkUqma1ECQ2wp3SF06u7iFecB78B
-aKJhOGOF1GHc0DMHNwLfSw1wIBah5K1SFm4JQyEYrG/KeRGXRKKGlKPGTKQPQRCU
-LCqbWnFMPBW5b/V/Xv02BBemgwp44RsFDQo6AVp6zbzWwh84oLrFSuGMK7aGynGA
-/MzGAmrC5jbIC62WAejlSj51o6sHoQNjn59PULZWqsbfD0DWH1DXeGqzLNd0phx7
-v1yDjLVq7J64YNYtxctZ+G54Pkg5wHTyx+dt3gKi/wVSc/cOHNDC2QxWhvSxL0cp
-/QpgggxaADcPZkvQe2/34wbqBTYbz9j+PODuad8hrqSLMLzX5iIFqE8qOYNPwH8z
-Lz66G4k3kp307/+0pocIRASn9dtX0PgpGyKo1hvg3zYNP+ObRPOT06Zx1HhEIx2S
-7oQXaQNDJpZd5tO+s7RY80ficybUe7wC4BnqNaoxVluBaIEA2NdiPHOiL5Sh0sme
-0oI=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.p12 b/src/test/setup/radius-config/freeradius/certs/client.p12
deleted file mode 100644
index d1289a9..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/client.pem b/src/test/setup/radius-config/freeradius/certs/client.pem
deleted file mode 100644
index 6dc7d9b..0000000
--- a/src/test/setup/radius-config/freeradius/certs/client.pem
+++ /dev/null
@@ -1,60 +0,0 @@
-Bag Attributes
-    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
-MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
-Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
-d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
-Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
-ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
-1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
-vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
-gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
-cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
-RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
-8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
-nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
-aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
-XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
-vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
-aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
-QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
-U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
-AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
-Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
-T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
-0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
-ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
-sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
-VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
-B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
-uGo=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/dh b/src/test/setup/radius-config/freeradius/certs/dh
deleted file mode 100644
index e7b4f90..0000000
--- a/src/test/setup/radius-config/freeradius/certs/dh
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN DH PARAMETERS-----
-MIGHAoGBAKHERxCGYaLWD6ay09DuGxxs5whd4zFUS1pjA7jEvGwnbISSzGvzRbYi
-ymNeNgzrZhHiWo5GC008yLvUy0qxVMny0x+7xybup+mOv6ITEz+HuhlsBN+Aqc5P
-Oyq7h1qnuy8UiiEP87YcwhCFooQ3I8dCcMT7AVApYex4K81Sck/LAgEC
------END DH PARAMETERS-----
diff --git a/src/test/setup/radius-config/freeradius/certs/fef12f18.0 b/src/test/setup/radius-config/freeradius/certs/fef12f18.0
deleted file mode 120000
index e375f5a..0000000
--- a/src/test/setup/radius-config/freeradius/certs/fef12f18.0
+++ /dev/null
@@ -1 +0,0 @@
-ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt b/src/test/setup/radius-config/freeradius/certs/index.txt
deleted file mode 100644
index 27c2c7a..0000000
--- a/src/test/setup/radius-config/freeradius/certs/index.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	170306185336Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.attr b/src/test/setup/radius-config/freeradius/certs/index.txt.attr
deleted file mode 100644
index 8f7e63a..0000000
--- a/src/test/setup/radius-config/freeradius/certs/index.txt.attr
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old b/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old
deleted file mode 100644
index 8f7e63a..0000000
--- a/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.old b/src/test/setup/radius-config/freeradius/certs/index.txt.old
deleted file mode 100644
index f0ce0ce..0000000
--- a/src/test/setup/radius-config/freeradius/certs/index.txt.old
+++ /dev/null
@@ -1 +0,0 @@
-V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/serial b/src/test/setup/radius-config/freeradius/certs/serial
deleted file mode 100644
index 75016ea..0000000
--- a/src/test/setup/radius-config/freeradius/certs/serial
+++ /dev/null
@@ -1 +0,0 @@
-03
diff --git a/src/test/setup/radius-config/freeradius/certs/serial.old b/src/test/setup/radius-config/freeradius/certs/serial.old
deleted file mode 100644
index 9e22bcb..0000000
--- a/src/test/setup/radius-config/freeradius/certs/serial.old
+++ /dev/null
@@ -1 +0,0 @@
-02
diff --git a/src/test/setup/radius-config/freeradius/certs/server.cnf b/src/test/setup/radius-config/freeradius/certs/server.cnf
deleted file mode 100644
index 444372d..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.cnf
+++ /dev/null
@@ -1,54 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/server.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/server.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= server
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-
-[server]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Server Certificate"
-
diff --git a/src/test/setup/radius-config/freeradius/certs/server.crt b/src/test/setup/radius-config/freeradius/certs/server.crt
deleted file mode 100644
index 246df1b..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.crt
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 1 (0x1)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:35 2016 GMT
-            Not After : Mar  6 18:53:35 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
-                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
-                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
-                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
-                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
-                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
-                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
-                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
-                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
-                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
-                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
-                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
-                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
-                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
-                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
-                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
-                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
-                    de:a9
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
-         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
-         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
-         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
-         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
-         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
-         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
-         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
-         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
-         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
-         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
-         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
-         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
-         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
-         8b:78:9b:6f
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
-MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
-zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
-NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
-cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
-bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
-NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
-UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
-kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
-tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
-eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
-zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
-ceVGJAlllSIbNYt4m28=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.csr b/src/test/setup/radius-config/freeradius/certs/server.csr
deleted file mode 100644
index d055b9e..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.csr
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICzjCCAbYCAQAwgYgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UE
-BxMJU29tZXdoZXJlMRMwEQYDVQQKEwpDaWVuYSBJbmMuMR4wHAYJKoZIhvcNAQkB
-Fg9hZG1pbkBjaWVuYS5jb20xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRp
-ZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp5s9so9t3VXH
-NFqLx3io/xT6IQ5gGwyHNvEHOszxip0jTDGNgZILH7L5b1V5w/0Yj5mni4xBGKYC
-CMu1W4u3I6NtIKns7r/68ZnXBzWhO+let4SK211GFT4fki0S20zDqhPH3S2gCtI8
-WRn6fNmltBa9gro1R8Tc+69h8XDYsyzvkSDF1a+3rF0VTupkqwuz7iV+qqigpTYu
-We24xwJPq5vnUEwwFE1IGqKIBW5+gu/4xXC12D6u9uAuaLpS0+U6LQ/dQ4Y5ta9b
-w4Z7mHh/1Zvun+RQXgOeKWf1eDWx0+JmLWg2wjDJBsIcc5vDCSi6CLj1SeBb0UPZ
-OAZHMqLeqQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAI4JSSggafFTzjYU4n9s
-lKYCCy8+MQ2X9eEKcsBwDiPvXmJdeWJTFYfBHE5p6spyA3IULxb9N90Kajdf287L
-e1Gurh4XuLd2gv/UAR4fpYJ6r0jJviWAe77R1cuJ+QvQWOaSWzJmxtZkO7OdBl0A
-XmksiRLnKu0mGEcGSQQ3vfdwDTGqpfLuSn9U6B8FoK7AjkeB1oKf6UgHnbN01UKp
-ubExjX4lNHLLiNrXjBkyDpW5zBbbhEaPdnDLHvNnd6fTkd7F3Jt5timmrm9hKMMB
-hE7qLyiBoSdqFejZEPjcvJGV42sNetREqIrWnvsXrox+7P+5z8+uowebLXGohfJC
-hmc=
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.key b/src/test/setup/radius-config/freeradius/certs/server.key
deleted file mode 100644
index 63be1bd..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.key
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI9q05vTmS4WYCAggA
-MBQGCCqGSIb3DQMHBAi4iVH6BL89ZQSCBMiK+P9gWMj1xFZqEQZ/VUTB0bt6YM8q
-nc1otC9KdWVCl5zqdV3vC0BdLMGv3Xem+u/ycWyrSsqDD3RzFqIjIxJTsAQRMKwr
-Fu4sNBMyAh0TCzVtf73QjiRg8Jtkf4UbTfJzNo1J3rjglnkSJ+9rCAYT4Ei84opN
-T/pdlhw9uRRsz7v+HRzajcpyw6FxtjLOUI2PaG8Lyrlrgt6uP1PvurK2+jexZ8o6
-OWIo5kbrn/rpzTiHWNgRoWnT71J5/lXE8hkjtv/5WAuncPAaUVdo0nKg58RD66St
-MOfQKlISeOdNw0yUWNPKkr98Tnp+fSUFHV4NCpMoV7mgab16grd8XR4qnOYuq8Ay
-9m0kzvffeASJj9hmpRDrZGrPXijNCRtEE8WQv3tLAYRaH180m6qCr7cOCS89LZZ4
-sVEIiAsOgCuAX3E3PGrdFbsGR6MnRpoHNxtUkD5g/b//8HTJ7b0EMKp00VTuHQRH
-JxxTZnbPSmsHJ+RmKL1K3eHqCDXuTPVFdDh82mabd/EiSdfj13+8etMQrF62XhDw
-r/2ElsO1yIPkXg9+FuC67EIBkYEbpuCXkvqYeuYEskEtoSDCj5yoX/aNJUkVImA3
-zveRCH8GMD0kaIf9IQdQ1jJxUGc3ZWFo6MIFAUD5eGXfwWX1x11sFJP2uBdf+31A
-0GhFICUaziHcDrHtqp5/nzo8f0hh+y3zXLx/Mf+WMC0Nirh7nyMoEmeNufYZtdvI
-5u90rYiPr7yS8vQD1R0LQZnODmtx0akn9HAtFvGzFbfa6x+2RoPpDiKS43ZCQPeW
-8JhWakNKijzfl2vufVUtSDZ5cPg5oyTH2NMw+DAgxqowtPmYV9J+ecZ9akwKk1Uz
-cLpNPrDmdUCyfztU5tlfTIdduafj9eIIgvVZs9wajlEWvooMW7cwbKYA0I5wYdq2
-lqFvnJtngUuvykYPFLg/ME+bXbdmQ6M91HpxOqUKp1feX4TW6yDlStpA40vPO3iB
-HmfL1DW3O4JTmvBwdoLPYoL5vP3/st51vXMXUcnyjHAzCa4HXj80PWyBsCM6S/iT
-SJtieMXSLw7R30D5boXncQS/fBCsdJpEpz2GyjJUn2RLbYJ3OsQbXB0eCaL7y9LL
-hGVK5Ez/HWjZ7Q6WRotVjeO5yRIgzWe4VRV58CVOH2CIkf1ODolzhREyzSBCGD6Q
-5rOZSAd21aStrNWQ02nYPXZbcnTo1LQImonSQ4SJZg0lsRSHfahmXkKafyYg5U8E
-jiff1uzSWWtmSZkY46S4dzQOZsY97k8cChliSnY1Jk8mh/5D9ehLxalUNMv0DIN/
-yTgYmC1TasTdchkSZdEyli8xvGWcmMKC+A5ycfRyE2mPxuEL6nQq4MAH7Yie9g7T
-Fzamniy0SXT08yXu2oFhi7VLyxSbGvIBQqE06rh2NVgt+N1eRSa/SJlkB6iqEmEA
-X+4b3D3s+ST6bZ19b6PP1t4tbfpGZ3LGezndpY4GqgfsUi5hdQcdfRjknCyFRZKm
-Qqi43ojk1xsdUHt/q0Y4RFHMtR5oQTapRXybQBRbzS7KCiRsH356ACowvV0UCNg2
-WzfFm3uozQO6NJCfWePdkfVrxU0p4q9s9QxxDX5SApQpqcwt0rJiDOzXvxKH8jx6
-qHo=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.p12 b/src/test/setup/radius-config/freeradius/certs/server.p12
deleted file mode 100644
index 352d346..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/server.pem b/src/test/setup/radius-config/freeradius/certs/server.pem
deleted file mode 100644
index b8b70f5..0000000
--- a/src/test/setup/radius-config/freeradius/certs/server.pem
+++ /dev/null
@@ -1,60 +0,0 @@
-Bag Attributes
-    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
-MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
-zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
-NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
-cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
-bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
-NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
-UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
-kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
-tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
-eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
-zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
-ceVGJAlllSIbNYt4m28=
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIQUZafKqU+44CAggA
-MBQGCCqGSIb3DQMHBAhxfosFDCxaJwSCBMhbvtkYjR1vowEixVQS2J5vL4EhHv7x
-ImI1vnLIbjY6HmUGuOolLidZQ7ieG1hrElat3gPCgw7zfmZyS3DOnLTxBAZRlOxK
-3zkBnegVGNbOnnsLJJX52JSDJvGnlUPWg3r8UhFp3aPH1eqUyt0bTySgpsSdt3yD
-/oWymM1bQW65KfKW3cskR+oKyqjh4rQevyTf5dR2r4noVfR96RqdJWJ95ag40GXN
-gpLDBTZD+iuMN1PiH9CraJWbRIWQOM0ieC79wGZ57V5tzgHENNey2itgwJ93iCj0
-Ay4f0HUEOuqJ7kK1fYEo+MUBt5TzpLBygGIVgbusz57C6DgCHwhBFtLS952GkmEP
-CAKM9I7wWR3w0Mj5maz4kq2hSzou1j81+ivxSkXMEGsCfwbrjY1QIitZdeEu31ti
-uf9+Jx2tK2yIu7+MLnMnpB7vdXrrPT6wipGMBe8a1/sczE2/foW0e2VarQIuS8bt
-fVpnfXT91Mf0DVn6Bc+ZI3EMG555Ah7GqbVztAlRm6IpbpFyFixx8m6oBwYc/ik6
-fReFzBOq+hV9VPAwYkzGlR+6hhfxhCUyE89LmB2z+fJvEMRj+X8LG21bHTkJoymp
-E/a4NIvOZv1vE3PpK7quZDm9HT/hdTsXbqlfbIBPcpJyUSbTcdBX2jcXfTz0od8Z
-e1iNlQ93d8FHuZFbtYiiZRSWGHPXI3sc96qY12cbUftZy20eN2esn37l13mDi0uS
-Qn0lAQFQwnEF4RROSSoLJefXc9kNXxq2cgZ/rWuUerwQQfMWU5tPwDS5UEoJjQg3
-eK2GH8YMoUuS178X9IU8cXD6vFkSOQ4uZ7L8sY7YHxqo8FeKW+YA7j5U8aNkVC3X
-crlV7VAbfd5k8NDaNe39dM8YNfJre6yBF8Wbvh6HV2a2JgzeQHQPXqLIKC27MCCY
-67P/IHmTis5Yz/tDKwO19N463VrDC6wno6fQdeNe5j3j29/y3YAkJweUtvXCYYJ6
-MOBh5hM+jMJWNSnfERUhjzp+FDoVzZgcxZ8OKbkOr6QZo3WBC7ogoJAVIaNS9Kl+
-RXUhdEd2uoYzwcNEmE9EqRTs8+Yy4VlgPS2iHWy+lboa+1Fi4yAZzeTmAd/BLQNB
-kLUI4OzniBtHn0C4oHz+Lfkm24t5iR5pxIWhNnOOxS0gkObtyWPlcC3LXYZ85ude
-mR8265l5FP9jabzfnCfoZWtdnIBUNcwAcB5oCdChipfJobXrmjyp5W8Sw8enr0BU
-ZJ2MwTGufoeQ3t2IsybY82TuXB6aLegevH3xC4kJV3We83LcUxNhkqmycU935ew0
-cJVQO8C3J5U4Pha8tn1+mKvDaKcv4HmG0YZyN48tdOtR1y4+Xzhq9hSwKl+rzG1Y
-TP0mW1fNfHRDrbykxkIpAXay9kDtfafalMI3ShndZfYiYJBe8IB+m9NML/lEPQyC
-fHH3xPNixHu74a71b6xgMZFhrrXBikfMUB1qroWa+9ocy/5LvdfCRIQN+ti7Tb4F
-FH5qzP/qAfjEdejuIdHHKNs/wkhTixqi8QCkDWEXkDj8AsiVmiBva6luSuQ31OiT
-ERZmRhkZfpkKmo4Jgc12dNsOqXYPF2KJ16bSElfuY5PGYR8JEw9Tz1k1UaMmrOGR
-guU=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem b/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem
deleted file mode 100644
index 6dc7d9b..0000000
--- a/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem
+++ /dev/null
@@ -1,60 +0,0 @@
-Bag Attributes
-    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
-MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
-Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
-d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
-Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
-ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
-1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
-vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
-gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
-cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
-RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
-8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
-nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
-aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
-XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
-vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
-aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
-QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
-U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
-AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
-Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
-T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
-0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
-ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
-sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
-VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
-B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
-uGo=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/xpextensions b/src/test/setup/radius-config/freeradius/certs/xpextensions
deleted file mode 100644
index 8e4a9a2..0000000
--- a/src/test/setup/radius-config/freeradius/certs/xpextensions
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#  File containing the OIDs required for Windows.
-#
-#  http://support.microsoft.com/kb/814394/en-us
-#
-[ xpclient_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.2
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-[ xpserver_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.1
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-#
-#  Add this to the PKCS#7 keybag attributes holding the client's private key
-#  for machine authentication.
-#
-#  the presence of this OID tells Windows XP that the cert is intended
-#  for use by the computer itself, and not by an end-user.
-#
-#  The other solution is to use Microsoft's web certificate server
-#  to generate these certs.
-#
-# 1.3.6.1.4.1.311.17.2
diff --git a/src/test/setup/radius-config/freeradius/certs_2/01.pem b/src/test/setup/radius-config/freeradius/certs_2/01.pem
deleted file mode 100644
index 246df1b..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/01.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 1 (0x1)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:35 2016 GMT
-            Not After : Mar  6 18:53:35 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
-                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
-                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
-                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
-                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
-                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
-                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
-                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
-                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
-                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
-                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
-                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
-                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
-                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
-                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
-                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
-                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
-                    de:a9
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
-         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
-         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
-         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
-         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
-         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
-         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
-         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
-         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
-         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
-         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
-         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
-         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
-         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
-         8b:78:9b:6f
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
-MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
-zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
-NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
-cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
-bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
-NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
-UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
-kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
-tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
-eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
-zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
-ceVGJAlllSIbNYt4m28=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/02.pem b/src/test/setup/radius-config/freeradius/certs_2/02.pem
deleted file mode 100644
index 1197fec..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/02.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 2 (0x2)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 18:53:36 2016 GMT
-            Not After : Mar  6 18:53:36 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
-                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
-                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
-                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
-                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
-                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
-                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
-                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
-                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
-                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
-                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
-                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
-                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
-                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
-                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
-                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
-                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
-                    6b:ff
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
-         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
-         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
-         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
-         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
-         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
-         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
-         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
-         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
-         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
-         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
-         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
-         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
-         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
-         c2:15:2d:ca
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/03.pem b/src/test/setup/radius-config/freeradius/certs_2/03.pem
deleted file mode 100644
index 22fc813..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/03.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 3 (0x3)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:46:18 2017 GMT
-            Not After : Mar  6 00:46:18 2018 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:d5:78:e4:d8:c6:a7:cf:08:22:c7:d7:bf:d8:47:
-                    78:bc:78:76:c3:a8:a1:70:42:be:0d:ed:d1:ef:05:
-                    58:a9:b5:31:13:0f:f2:15:53:da:95:cf:ee:14:1c:
-                    c8:23:79:f2:0b:e2:ba:1e:36:52:f0:f5:32:0e:ab:
-                    18:45:0f:31:f5:b7:84:6d:b4:41:4c:33:9d:e5:07:
-                    b4:72:98:da:de:6b:37:4e:8f:16:c9:e4:42:42:2c:
-                    8d:87:ee:62:fa:c1:b5:a9:3c:d0:5e:0a:35:df:cb:
-                    7a:1c:03:7f:37:ec:31:63:78:be:04:f8:39:f9:55:
-                    93:e4:b0:65:53:34:43:69:64:89:66:2c:8a:a4:38:
-                    25:74:ae:e6:c4:63:96:43:5d:9a:9e:9b:27:79:d8:
-                    fc:3c:36:2b:8e:ab:37:d4:06:65:bd:24:b1:66:c5:
-                    32:1c:b2:81:e8:2d:32:69:73:ac:89:d6:b7:f2:98:
-                    7a:83:88:0a:1a:3f:31:eb:ee:83:fe:d0:f0:88:f5:
-                    97:54:26:d8:a2:6e:6a:97:88:aa:a1:b0:52:ed:0f:
-                    d5:cb:f5:0b:36:bb:69:79:1b:5c:1c:72:ed:26:61:
-                    00:8f:0c:7b:9a:75:a7:3a:5e:15:65:30:bb:94:06:
-                    25:a8:8e:f4:50:80:b6:ba:64:7c:00:bd:4d:f9:0a:
-                    de:5d
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         93:2b:1e:f2:bd:3e:bb:09:3c:96:ad:3a:23:1b:4e:54:0e:0f:
-         99:d4:a1:22:7a:96:a4:0b:d8:0e:98:ca:1f:b1:57:ee:53:47:
-         06:1d:40:5f:07:c2:41:14:7e:cb:38:0b:e2:27:b8:72:ca:71:
-         90:70:05:c6:2e:6b:a3:96:40:43:50:61:a7:07:e2:e1:45:b9:
-         a5:ed:01:1e:c8:33:b5:2b:88:99:4a:c7:1e:f5:c4:55:9e:49:
-         16:25:cd:19:65:d1:c1:c4:a1:8f:69:04:20:e9:f4:b0:08:16:
-         59:8c:89:47:44:fc:64:8e:0b:b8:49:d4:2c:a6:f7:8e:2e:32:
-         ca:57:91:0d:f2:c6:74:c8:cb:45:45:6f:36:32:a6:d2:00:49:
-         ee:5f:7e:62:97:a5:d0:8c:08:88:eb:83:5a:ec:cb:5d:f4:31:
-         05:8d:41:63:ee:7a:24:5b:8b:96:c7:6d:f0:d4:09:e0:32:e9:
-         fc:39:29:dd:6a:fb:31:58:f9:90:ef:af:12:c9:12:e9:5b:fc:
-         2c:47:6b:ee:91:aa:a2:d8:cb:fc:ad:06:95:8c:58:a1:3d:64:
-         2e:ca:c6:e1:11:b1:ed:5f:43:4a:21:e1:86:1d:6f:f3:1f:12:
-         1a:81:46:eb:ef:b3:97:77:f6:93:be:2b:c2:d4:62:09:75:fe:
-         46:49:29:e4
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NjE4WhcN
-MTgwMzA2MDA0NjE4WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xIzAhBgNVBAMMGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDVeOTYxqfPCCLH17/YR3i8eHbDqKFwQr4N7dHv
-BViptTETD/IVU9qVz+4UHMgjefIL4roeNlLw9TIOqxhFDzH1t4RttEFMM53lB7Ry
-mNreazdOjxbJ5EJCLI2H7mL6wbWpPNBeCjXfy3ocA3837DFjeL4E+Dn5VZPksGVT
-NENpZIlmLIqkOCV0rubEY5ZDXZqemyd52Pw8NiuOqzfUBmW9JLFmxTIcsoHoLTJp
-c6yJ1rfymHqDiAoaPzHr7oP+0PCI9ZdUJtiibmqXiKqhsFLtD9XL9Qs2u2l5G1wc
-cu0mYQCPDHuadac6XhVlMLuUBiWojvRQgLa6ZHwAvU35Ct5dAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAJMr
-HvK9PrsJPJatOiMbTlQOD5nUoSJ6lqQL2A6Yyh+xV+5TRwYdQF8HwkEUfss4C+In
-uHLKcZBwBcYua6OWQENQYacH4uFFuaXtAR7IM7UriJlKxx71xFWeSRYlzRll0cHE
-oY9pBCDp9LAIFlmMiUdE/GSOC7hJ1Cym944uMspXkQ3yxnTIy0VFbzYyptIASe5f
-fmKXpdCMCIjrg1rsy130MQWNQWPueiRbi5bHbfDUCeAy6fw5Kd1q+zFY+ZDvrxLJ
-Eulb/CxHa+6RqqLYy/ytBpWMWKE9ZC7KxuERse1fQ0oh4YYdb/MfEhqBRuvvs5d3
-9pO+K8LUYgl1/kZJKeQ=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/04.pem b/src/test/setup/radius-config/freeradius/certs_2/04.pem
deleted file mode 100644
index 893ee5c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/04.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 4 (0x4)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:46:19 2017 GMT
-            Not After : Mar  6 00:46:19 2018 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:b3:d4:8e:0d:81:9a:1d:06:ba:10:11:d2:db:ec:
-                    eb:6d:7a:da:e7:c1:d6:a6:bf:91:bf:a9:f1:99:7b:
-                    94:63:9e:17:c3:8e:62:19:25:87:ea:4d:cc:40:f6:
-                    e1:a1:2f:af:01:17:ba:bb:a6:4c:02:fd:98:71:01:
-                    d9:14:13:47:10:e5:e6:75:71:8e:cd:0b:1e:b0:55:
-                    ad:17:c8:f3:d2:67:71:b1:9f:a2:04:04:76:ce:84:
-                    a4:82:ee:75:8a:22:02:8b:1c:76:e6:ab:e7:c3:6a:
-                    51:30:78:5d:6b:d6:3f:94:24:e8:6f:74:2e:6f:0b:
-                    18:1c:86:7b:ea:36:b9:27:40:83:11:fe:f0:72:f7:
-                    01:0e:d2:ef:b1:32:c6:18:fb:bb:e8:89:18:68:e5:
-                    ab:f2:4f:9d:25:df:56:31:dd:1b:54:3f:9c:b4:0b:
-                    62:f7:d0:d2:33:ea:53:99:48:4a:46:77:bb:47:ed:
-                    bc:b1:f4:4d:d6:41:4b:d0:72:57:82:74:98:98:78:
-                    19:f8:8c:55:03:ec:d3:51:3c:05:ec:53:ff:cf:f7:
-                    9e:9e:69:bd:b0:1e:3d:7b:0c:a2:55:4e:be:f7:da:
-                    27:69:c9:4e:10:6a:16:36:03:41:b3:e1:17:79:37:
-                    f3:49:a9:ce:e2:91:03:47:92:58:6d:43:41:90:60:
-                    d9:a1
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         20:56:2b:1e:6b:aa:9f:d8:24:cf:b1:e3:ea:b8:4c:ce:d2:87:
-         88:2c:27:dc:54:85:18:24:79:d3:db:79:dc:6b:7c:e4:52:bd:
-         94:79:72:d2:1e:51:83:2c:00:f0:28:7d:1b:e9:22:ab:66:28:
-         fd:7f:ad:62:47:1b:25:9f:e4:0b:47:b1:34:c4:2a:a4:79:66:
-         b8:68:a5:de:ca:62:d4:c1:a8:c8:da:44:96:49:d4:63:2d:81:
-         0a:83:1e:b8:87:db:47:da:e1:c0:48:57:e3:39:59:99:bc:96:
-         2f:a3:a1:99:94:c8:a4:be:bc:47:ff:48:61:95:68:88:9e:22:
-         43:8c:c6:24:18:7b:78:eb:25:a3:79:8d:4c:ca:3e:4b:d9:ee:
-         38:94:52:4d:12:83:e1:89:dc:ab:38:6d:92:e9:c2:1a:33:36:
-         29:c8:78:14:6b:d3:95:29:89:5a:e3:a9:9e:11:19:25:db:82:
-         82:40:81:55:55:a9:1f:a2:43:7a:5a:15:7f:78:2a:7c:c0:08:
-         a0:99:2c:7f:78:3b:e6:26:d4:cf:cb:ab:35:07:40:20:21:a4:
-         0c:7d:b8:49:7a:c8:33:dd:36:da:41:7d:79:53:98:33:83:c2:
-         7b:69:51:a1:e8:cc:3f:6e:3a:32:9a:7c:c8:42:4f:74:af:5c:
-         8e:e6:2e:54
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NjE5WhcN
-MTgwMzA2MDA0NjE5WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBALPUjg2Bmh0GuhAR0tvs62162ufB1qa/kb+p8Zl7lGOeF8OOYhklh+pN
-zED24aEvrwEXurumTAL9mHEB2RQTRxDl5nVxjs0LHrBVrRfI89JncbGfogQEds6E
-pILudYoiAoscduar58NqUTB4XWvWP5Qk6G90Lm8LGByGe+o2uSdAgxH+8HL3AQ7S
-77Eyxhj7u+iJGGjlq/JPnSXfVjHdG1Q/nLQLYvfQ0jPqU5lISkZ3u0ftvLH0TdZB
-S9ByV4J0mJh4GfiMVQPs01E8BexT/8/3np5pvbAePXsMolVOvvfaJ2nJThBqFjYD
-QbPhF3k380mpzuKRA0eSWG1DQZBg2aECAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAIFYrHmuqn9gkz7Hj6rhM
-ztKHiCwn3FSFGCR509t53Gt85FK9lHly0h5RgywA8Ch9G+kiq2Yo/X+tYkcbJZ/k
-C0exNMQqpHlmuGil3spi1MGoyNpElknUYy2BCoMeuIfbR9rhwEhX4zlZmbyWL6Oh
-mZTIpL68R/9IYZVoiJ4iQ4zGJBh7eOslo3mNTMo+S9nuOJRSTRKD4YncqzhtkunC
-GjM2Kch4FGvTlSmJWuOpnhEZJduCgkCBVVWpH6JDeloVf3gqfMAIoJksf3g75ibU
-z8urNQdAICGkDH24SXrIM9022kF9eVOYM4PCe2lRoejMP246Mpp8yEJPdK9cjuYu
-VA==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/05.pem b/src/test/setup/radius-config/freeradius/certs_2/05.pem
deleted file mode 100644
index f206dde..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/05.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 5 (0x5)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:47:43 2017 GMT
-            Not After : Oct 31 00:47:43 2022 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:cb:b3:5b:bc:a3:20:e3:79:6d:a0:45:d9:cc:6d:
-                    54:4a:53:d0:72:66:92:b7:00:35:43:9c:a0:99:5c:
-                    94:f0:65:d3:c1:a1:e8:4b:5f:19:57:dc:9a:e3:52:
-                    ba:61:98:e6:a2:73:8b:e1:72:0b:53:e5:50:35:ab:
-                    58:ec:04:ff:b0:78:ab:f6:61:d0:8a:c8:43:af:c2:
-                    fe:43:26:20:4c:78:cd:01:b7:d0:70:d4:2d:f3:c8:
-                    1b:c7:84:aa:be:57:6b:49:b0:f8:66:db:ec:2c:68:
-                    05:9c:8f:2d:e2:a3:a3:be:f6:8b:a9:d3:f4:01:96:
-                    d2:76:1e:1b:a7:b5:87:a5:ed:b6:a6:2e:50:76:05:
-                    11:84:92:b0:d8:70:46:1f:3c:ee:07:fc:ca:45:dd:
-                    ca:df:f7:0b:27:05:6d:4d:ce:02:39:49:92:c3:87:
-                    f8:44:76:e2:b4:9c:2e:00:18:8d:0b:8f:9a:8c:7f:
-                    cd:99:81:d2:1c:d2:f7:48:5b:12:4d:40:ef:4c:cf:
-                    3b:ea:f4:6e:60:c3:a4:a7:df:fb:0f:1b:39:75:7f:
-                    60:0a:d6:d7:9a:9a:f0:80:aa:b6:d5:da:cc:08:64:
-                    80:a2:dd:0a:01:1e:6f:9b:19:49:c5:97:b6:2e:6d:
-                    c5:8c:98:4c:13:a4:15:6a:d5:fc:66:cf:17:83:0e:
-                    bf:fb
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         a0:f6:b8:4d:6f:f5:5e:ae:04:98:c8:44:cd:24:4d:d9:f1:a2:
-         f5:c1:ac:97:d5:d9:30:e3:f8:e0:40:97:ac:bf:71:61:fa:c1:
-         7c:c6:7b:09:36:76:12:a1:9d:3a:91:c0:11:6b:35:6b:5b:32:
-         ec:72:af:84:20:c0:a8:81:50:20:aa:98:92:52:20:c6:a4:97:
-         00:7a:4c:70:ba:b5:c4:25:46:7e:c5:46:48:92:cc:e0:6b:d7:
-         a7:2c:25:77:f9:d3:11:e0:a0:b5:02:bb:c0:43:98:dd:4e:5c:
-         57:d7:f4:25:5b:a5:15:f2:db:23:62:e0:f7:20:66:e4:6d:f8:
-         10:73:43:22:df:80:25:7e:6e:ce:13:26:56:96:1d:39:f0:00:
-         0e:a7:8d:82:b9:7a:8a:7a:17:7e:7b:eb:19:df:26:36:a0:e4:
-         71:fd:09:ac:43:42:92:df:a2:f8:2b:4f:51:28:9d:8e:0c:ac:
-         3c:d5:ec:b8:97:89:57:f7:c8:bb:d3:3e:67:95:66:85:27:69:
-         7a:1e:fe:f5:a3:f9:df:c5:52:44:21:89:ed:c4:bb:20:d9:37:
-         80:fa:c3:f2:44:22:5f:fa:ea:5d:0c:59:10:07:d8:fa:91:e1:
-         9c:98:4e:c3:d4:ae:0e:5a:f4:6b:c4:c6:4a:dd:b7:d5:a3:a7:
-         c0:72:fd:04
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQzWhcN
-MjIxMDMxMDA0NzQzWjB0MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xIzAhBgNVBAMMGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDLs1u8oyDjeW2gRdnMbVRKU9ByZpK3ADVDnKCZ
-XJTwZdPBoehLXxlX3JrjUrphmOaic4vhcgtT5VA1q1jsBP+weKv2YdCKyEOvwv5D
-JiBMeM0Bt9Bw1C3zyBvHhKq+V2tJsPhm2+wsaAWcjy3io6O+9oup0/QBltJ2Hhun
-tYel7bamLlB2BRGEkrDYcEYfPO4H/MpF3crf9wsnBW1NzgI5SZLDh/hEduK0nC4A
-GI0Lj5qMf82ZgdIc0vdIWxJNQO9Mzzvq9G5gw6Sn3/sPGzl1f2AK1teamvCAqrbV
-2swIZICi3QoBHm+bGUnFl7YubcWMmEwTpBVq1fxmzxeDDr/7AgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAKD2
-uE1v9V6uBJjIRM0kTdnxovXBrJfV2TDj+OBAl6y/cWH6wXzGewk2dhKhnTqRwBFr
-NWtbMuxyr4QgwKiBUCCqmJJSIMaklwB6THC6tcQlRn7FRkiSzOBr16csJXf50xHg
-oLUCu8BDmN1OXFfX9CVbpRXy2yNi4PcgZuRt+BBzQyLfgCV+bs4TJlaWHTnwAA6n
-jYK5eop6F3576xnfJjag5HH9CaxDQpLfovgrT1EonY4MrDzV7LiXiVf3yLvTPmeV
-ZoUnaXoe/vWj+d/FUkQhie3EuyDZN4D6w/JEIl/66l0MWRAH2PqR4ZyYTsPUrg5a
-9GvExkrdt9Wjp8By/QQ=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/06.pem b/src/test/setup/radius-config/freeradius/certs_2/06.pem
deleted file mode 100644
index a35ea0c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/06.pem
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 6 (0x6)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:47:44 2017 GMT
-            Not After : Oct 31 00:47:44 2022 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:e4:f1:8b:9f:93:52:e6:a8:b1:48:7b:7f:fe:14:
-                    f6:e9:cb:57:7a:87:08:2e:d2:7f:77:b4:a3:c1:7e:
-                    14:fe:01:55:08:88:b2:55:13:12:ea:70:81:dd:d5:
-                    92:25:40:87:15:fb:e0:e7:37:c3:b5:c5:f7:c9:ac:
-                    2b:1e:dd:4d:eb:69:41:a6:4f:66:01:20:1a:20:e6:
-                    cc:6c:e5:00:69:30:a8:3e:05:3b:92:68:b4:93:20:
-                    23:4a:e9:71:8c:3e:33:fc:fe:58:35:60:27:1d:c8:
-                    f9:51:b2:0b:a9:45:2a:12:09:59:39:53:c9:28:5e:
-                    c8:c6:29:72:04:af:3a:e3:78:cf:c9:1e:08:d8:36:
-                    b9:a4:59:20:eb:c8:7f:a6:94:09:31:7b:7e:9d:87:
-                    0b:fe:3a:f3:fd:d3:e9:1f:d5:b9:82:85:35:da:6c:
-                    c9:4d:68:81:b4:2a:09:42:f8:58:73:36:b9:6c:fa:
-                    1b:ba:f3:5c:8d:3f:b2:49:0a:7a:6a:06:a2:e1:70:
-                    fb:42:37:c9:d3:e9:1a:98:96:2b:83:69:3c:a1:da:
-                    c7:87:51:19:a3:b5:36:64:c3:0a:da:c9:38:02:8f:
-                    43:1f:02:61:81:f4:1c:c3:69:05:b6:2f:89:d4:e7:
-                    1c:1c:58:50:1a:d6:36:9c:ce:27:fc:60:a2:96:b0:
-                    03:c9
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         a2:c6:b9:c1:5f:d1:03:65:ce:fd:25:46:5b:1e:4a:26:44:0d:
-         06:7e:b3:2a:e2:d1:38:37:64:9b:55:30:a6:47:52:66:96:28:
-         5f:0d:9d:b3:a6:0f:3d:bc:55:f6:01:5e:8a:d0:9c:3d:94:14:
-         86:8d:fc:16:0d:4b:62:b3:56:f5:4b:03:3c:2d:87:01:84:b1:
-         ae:f0:a2:9d:32:c2:d8:db:aa:2e:c3:e3:ec:d4:ae:fb:8d:65:
-         df:b8:46:54:6e:16:4e:33:d7:3e:a7:ad:df:77:b4:9a:ac:f5:
-         79:85:67:dd:2b:06:bd:99:7d:f2:c8:06:e8:00:c3:d5:1a:94:
-         6f:48:29:12:87:26:a0:df:b2:c7:95:b4:84:80:30:65:03:c1:
-         b7:89:df:01:67:46:1f:d0:1c:25:32:ff:1a:3a:fb:e6:6e:29:
-         dd:c4:18:9b:60:27:44:ea:7e:06:29:b8:00:42:ef:d3:65:88:
-         97:be:ef:61:68:af:97:3a:24:d4:97:d9:df:c1:8b:f7:8f:82:
-         30:b3:92:28:db:fd:38:b8:dc:33:fb:a1:e5:0e:8f:07:ee:31:
-         bf:c9:00:8e:63:bf:16:af:53:e9:fd:0a:71:03:4d:3d:8e:b7:
-         a2:66:42:8e:b1:62:30:43:2f:a0:e0:f8:18:88:3e:59:5d:92:
-         28:bb:10:d2
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQ0WhcN
-MjIxMDMxMDA0NzQ0WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7Sf3e0o8F+FP4BVQiIslUTEupw
-gd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEgGiDmzGzlAGkwqD4FO5JotJMg
-I0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTySheyMYpcgSvOuN4z8keCNg2uaRZ
-IOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1ogbQqCUL4WHM2uWz6G7rzXI0/
-skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1NmTDCtrJOAKPQx8CYYH0HMNp
-BbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAosa5wV/RA2XO/SVGWx5K
-JkQNBn6zKuLRODdkm1UwpkdSZpYoXw2ds6YPPbxV9gFeitCcPZQUho38Fg1LYrNW
-9UsDPC2HAYSxrvCinTLC2NuqLsPj7NSu+41l37hGVG4WTjPXPqet33e0mqz1eYVn
-3SsGvZl98sgG6ADD1RqUb0gpEocmoN+yx5W0hIAwZQPBt4nfAWdGH9AcJTL/Gjr7
-5m4p3cQYm2AnROp+Bim4AELv02WIl77vYWivlzok1JfZ38GL94+CMLOSKNv9OLjc
-M/uh5Q6PB+4xv8kAjmO/Fq9T6f0KcQNNPY63omZCjrFiMEMvoOD4GIg+WV2SKLsQ
-0g==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/07a45775.0 b/src/test/setup/radius-config/freeradius/certs_2/07a45775.0
deleted file mode 120000
index 799a1c6..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/07a45775.0
+++ /dev/null
@@ -1 +0,0 @@
-client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0 b/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0
deleted file mode 120000
index 799a1c6..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0
+++ /dev/null
@@ -1 +0,0 @@
-client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/34e00910.0 b/src/test/setup/radius-config/freeradius/certs_2/34e00910.0
deleted file mode 120000
index 55f0c91..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/34e00910.0
+++ /dev/null
@@ -1 +0,0 @@
-01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/865470fd.0 b/src/test/setup/radius-config/freeradius/certs_2/865470fd.0
deleted file mode 120000
index e375f5a..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/865470fd.0
+++ /dev/null
@@ -1 +0,0 @@
-ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0 b/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0
deleted file mode 120000
index 55f0c91..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0
+++ /dev/null
@@ -1 +0,0 @@
-01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/Makefile b/src/test/setup/radius-config/freeradius/certs_2/Makefile
deleted file mode 100644
index c8f0892..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/Makefile
+++ /dev/null
@@ -1,140 +0,0 @@
-######################################################################
-#
-#	Make file to be installed in /etc/raddb/certs to enable
-#	the easy creation of certificates.
-#
-#	See the README file in this directory for more information.
-#
-#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
-#
-######################################################################
-
-DH_KEY_SIZE	= 1024
-
-#
-#  Set the passwords
-#
-PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
-
-USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
-CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
-
-######################################################################
-#
-#  Make the necessary files, but not client certificates.
-#
-######################################################################
-.PHONY: all
-all: index.txt serial dh random server ca client
-
-.PHONY: client
-client: client.pem
-
-.PHONY: ca
-ca: ca.der
-
-.PHONY: server
-server: server.pem server.vrfy
-
-######################################################################
-#
-#  Diffie-Hellman parameters
-#
-######################################################################
-dh:
-	openssl dhparam -out dh $(DH_KEY_SIZE)
-
-######################################################################
-#
-#  Create a new self-signed CA certificate
-#
-######################################################################
-ca.key ca.pem: ca.cnf
-	@[ -f index.txt ] || $(MAKE) index.txt
-	@[ -f serial ] || $(MAKE) serial
-	openssl req -new -x509 -keyout ca.key -out ca.pem \
-		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
-
-ca.der: ca.pem
-	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
-
-######################################################################
-#
-#  Create a new server certificate, signed by the above CA.
-#
-######################################################################
-server.csr server.key: server.cnf
-	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
-
-server.crt: server.csr ca.key ca.pem
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
-
-server.p12: server.crt
-	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-server.pem: server.p12
-	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-.PHONY: server.vrfy
-server.vrfy: ca.pem
-	@openssl verify -CAfile ca.pem server.pem
-
-######################################################################
-#
-#  Create a new client certificate, signed by the the above server
-#  certificate.
-#
-######################################################################
-client.csr client.key: client.cnf
-	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-
-client.crt: client.csr ca.pem ca.key
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-
-client.p12: client.crt
-	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-
-client.pem: client.p12
-	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-	cp client.pem $(USER_NAME).pem
-
-.PHONY: client.vrfy
-client.vrfy: ca.pem client.pem
-	c_rehash .
-	openssl verify -CApath . client.pem
-
-######################################################################
-#
-#  Miscellaneous rules.
-#
-######################################################################
-index.txt:
-	@touch index.txt
-
-serial:
-	@echo '01' > serial
-
-random:
-	@if [ -c /dev/urandom ] ; then \
-		ln -sf /dev/urandom random; \
-	else \
-		date > ./random; \
-	fi
-
-print:
-	openssl x509 -text -in server.crt
-
-printca:
-	openssl x509 -text -in ca.pem
-
-clean:
-	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
-
-#
-#	Make a target that people won't run too often.
-#
-destroycerts:
-	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
-			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig b/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig
deleted file mode 100644
index c8f0892..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig
+++ /dev/null
@@ -1,140 +0,0 @@
-######################################################################
-#
-#	Make file to be installed in /etc/raddb/certs to enable
-#	the easy creation of certificates.
-#
-#	See the README file in this directory for more information.
-#
-#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
-#
-######################################################################
-
-DH_KEY_SIZE	= 1024
-
-#
-#  Set the passwords
-#
-PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
-
-USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
-CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
-
-######################################################################
-#
-#  Make the necessary files, but not client certificates.
-#
-######################################################################
-.PHONY: all
-all: index.txt serial dh random server ca client
-
-.PHONY: client
-client: client.pem
-
-.PHONY: ca
-ca: ca.der
-
-.PHONY: server
-server: server.pem server.vrfy
-
-######################################################################
-#
-#  Diffie-Hellman parameters
-#
-######################################################################
-dh:
-	openssl dhparam -out dh $(DH_KEY_SIZE)
-
-######################################################################
-#
-#  Create a new self-signed CA certificate
-#
-######################################################################
-ca.key ca.pem: ca.cnf
-	@[ -f index.txt ] || $(MAKE) index.txt
-	@[ -f serial ] || $(MAKE) serial
-	openssl req -new -x509 -keyout ca.key -out ca.pem \
-		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
-
-ca.der: ca.pem
-	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
-
-######################################################################
-#
-#  Create a new server certificate, signed by the above CA.
-#
-######################################################################
-server.csr server.key: server.cnf
-	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
-
-server.crt: server.csr ca.key ca.pem
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
-
-server.p12: server.crt
-	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-server.pem: server.p12
-	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-.PHONY: server.vrfy
-server.vrfy: ca.pem
-	@openssl verify -CAfile ca.pem server.pem
-
-######################################################################
-#
-#  Create a new client certificate, signed by the the above server
-#  certificate.
-#
-######################################################################
-client.csr client.key: client.cnf
-	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-
-client.crt: client.csr ca.pem ca.key
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-
-client.p12: client.crt
-	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-
-client.pem: client.p12
-	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-	cp client.pem $(USER_NAME).pem
-
-.PHONY: client.vrfy
-client.vrfy: ca.pem client.pem
-	c_rehash .
-	openssl verify -CApath . client.pem
-
-######################################################################
-#
-#  Miscellaneous rules.
-#
-######################################################################
-index.txt:
-	@touch index.txt
-
-serial:
-	@echo '01' > serial
-
-random:
-	@if [ -c /dev/urandom ] ; then \
-		ln -sf /dev/urandom random; \
-	else \
-		date > ./random; \
-	fi
-
-print:
-	openssl x509 -text -in server.crt
-
-printca:
-	openssl x509 -text -in ca.pem
-
-clean:
-	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
-
-#
-#	Make a target that people won't run too often.
-#
-destroycerts:
-	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
-			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_2/README b/src/test/setup/radius-config/freeradius/certs_2/README
deleted file mode 100644
index f7e0591..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/README
+++ /dev/null
@@ -1,226 +0,0 @@
-  This directory contains scripts to create the server certificates.
-To make a set of default (i.e. test) certificates, simply type:
-
-$ ./bootstrap
-
-  The "openssl" command will be run against the sample configuration
-files included here, and will make a self-signed certificate authority
-(i.e. root CA), and a server certificate.  This "root CA" should be
-installed on any client machine needing to do EAP-TLS, PEAP, or
-EAP-TTLS.
-
-  The Microsoft "XP Extensions" will be automatically included in the
-server certificate.  Without those extensions Windows clients will
-refuse to authenticate to FreeRADIUS.
-
-  The root CA and the "XP Extensions" file also contain a crlDistributionPoints
-attribute. The latest release of Windows Phone needs this to be present
-for the handset to validate the RADIUS server certificate. The RADIUS
-server must have the URI defined but the CA need not have...however it
-is best practice for a CA to have a revocation URI. Note that whilst
-the Windows Mobile client cannot actually use the CRL when doing 802.1X
-it is recommended that the URI be an actual working URL and contain a
-revocation format file as there may be other OS behaviour at play and
-future OSes that may do something with that URI.
-
-  In general, you should use self-signed certificates for 802.1x (EAP)
-authentication.  When you list root CAs from other organisations in
-the "ca_file", you permit them to masquerade as you, to authenticate
-your users, and to issue client certificates for EAP-TLS.
-
-  If FreeRADIUS was configured to use OpenSSL, then simply starting
-the server in root in debugging mode should also create test
-certificates, i.e.:
-
-$ radiusd -X
-
-  That will cause the EAP-TLS module to run the "bootstrap" script in
-this directory.  The script will be executed only once, the first time
-the server has been installed on a particular machine.  This bootstrap
-script SHOULD be run on installation of any pre-built binary package
-for your OS.  In any case, the script will ensure that it is not run
-twice, and that it does not over-write any existing certificates.
-
-  If you already have CA and server certificates, rename (or delete)
-this directory, and create a new "certs" directory containing your
-certificates.  Note that the "make install" command will NOT
-over-write your existing "raddb/certs" directory, which means that the
-"bootstrap" command will not be run.
-
-
-		NEW INSTALLATIONS OF FREERADIUS
-
-
-  We suggest that new installations use the test certificates for
-initial tests, and then create real certificates to use for normal
-user authentication.  See the instructions below for how to create the
-various certificates.  The old test certificates can be deleted by
-running the following command:
-
-$ rm -f *.pem *.der *.csr *.crt *.key *.p12 serial* index.txt*
-
-  Then, follow the instructions below for creating real certificates.
-
-  Once the final certificates have been created, you can delete the
-"bootstrap" command from this directory, and delete the
-"make_cert_command" configuration from the "tls" sub-section of
-eap.conf.
-
-  If you do not want to enable EAP-TLS, PEAP, or EAP-TTLS, then delete
-the relevant sub-sections from the "eap.conf" file.
-
-
-		MAKING A ROOT CERTIFICATE
-
-
-$ vi ca.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the CA certificate.
-
-  Edit the [certificate_authority] section to have the correct values
-  for your country, state, etc.
-
-$ make ca.pem
-
-  This step creates the CA certificate.
-
-$ make ca.der
-
-  This step creates the DER format of the self-signed certificate,
-  which is can be imported into Windows.
-
-
-		MAKING A SERVER CERTIFICATE
-
-
-$ vi server.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the server certificate.
-
-  Edit the [server] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  different from the commonName for the CA certificate.
-
-$ make server.pem
-
-  This step creates the server certificate.
-
-  If you have an existing certificate authority, and wish to create a
-  certificate signing request for the server certificate, edit
-  server.cnf as above, and type the following command.
-
-$ make server.csr
-
-  You will have to ensure that the certificate contains the XP
-  extensions needed by Microsoft clients.
-
-
-		MAKING A CLIENT CERTIFICATE
-
-
-  Client certificates are used by EAP-TLS, and optionally by EAP-TTLS
-and PEAP.  The following steps outline how to create a client
-certificate that is signed by the server certificate created above.
-You will have to have the password for the server certificate in the
-"input_password" and "output_password" fields of the server.cnf file.
-
-
-$ vi client.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the client certificate.  You will have to give these
-  passwords to the end user who will be using the certificates.
-
-  Edit the [client] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  the User-Name that will be used for logins!
-
-$ make client.pem
-
-  The users certificate will be in "emailAddress.pem",
-  i.e. "user@example.com.pem".
-
-  To create another client certificate, just repeat the steps for
-  making a client certificate, being sure to enter a different login
-  name for "commonName", and a different password.
-
-
-		PERFORMANCE
-
-
-  EAP performance for EAP-TLS, TTLS, and PEAP is dominated by SSL
-  calculations.  That is, a normal system can handle PAP
-  authentication at a rate of 10k packets/s.  However, SSL involves
-  RSA calculations, which are very expensive.  To benchmark your system,
-  do:
-
-$ openssl speed rsa
-
-  or
-
-$ openssl speed rsa2048
-
-  to test 2048 bit keys.
-
-  A 1GHz system will likely do 30 calculations/s.  A 2GHz system may
-  do 50 calculations/s, or more.  That number is also the number of
-  authentications/s that can be done for EAP-TLS (or TTLS, or PEAP).
-
-
-		COMPATIBILITY
-
-The certificates created using this method are known to be compatible
-with ALL operating systems.  Some common issues are:
-
-  - Windows requires certain OIDs in the certificates.  If it doesn't
-    see them, it will stop doing EAP.  The most visible effect is
-    that the client starts EAP, gets a few Access-Challenge packets,
-    and then a little while later re-starts EAP.  If this happens, see
-    the FAQ, and the comments in raddb/eap.conf for how to fix it.
-
-  - Windows requires the root certificates to be on the client PC.
-    If it doesn't have them, you will see the same issue as above.
-
-  - Windows XP post SP2 has a bug where it has problems with
-    certificate chains.  i.e. if the server certificate is an
-    intermediate one, and not a root one, then authentication will
-    silently fail, as above.
-
-  - Some versions of Windows CE cannot handle 4K RSA certificates.
-    They will (again) silently fail, as above.
-
-  - In none of these cases will Windows give the end user any
-    reasonable error message describing what went wrong.  This leads
-    people to blame the RADIUS server.  That blame is misplaced.
-
-  - Certificate chains of more than 64K bytes are known to not work.
-    This is a problem in FreeRADIUS.  However, most clients cannot
-    handle 64K certificate chains.  Most Access Points will shut down
-    the EAP session after about 50 round trips, while 64K certificate
-    chains will take about 60 round trips.  So don't use large
-    certificate chains.  They will only work after everyone upgrade
-    everything in the network.
-
-  - All other operating systems are known to work with EAP and
-    FreeRADIUS.  This includes Linux, *BSD, Mac OS X, Solaris,
-    Symbian, along with all known embedded systems, phones, WiFi
-    devices, etc.
-
-  - Someone needs to ask Microsoft to please stop making life hard for
-    their customers.
-
-
-		SECURITY CONSIDERATIONS
-
-The default certificate configuration files uses MD5 for message
-digests, to maintain compatibility with network equipment that
-supports only this algorithm.
-
-MD5 has known weaknesses and is discouraged in favour of SHA1 (see
-http://www.kb.cert.org/vuls/id/836068 for details). If your network
-equipment supports the SHA1 signature algorithm, we recommend that you
-change the "ca.cnf", "server.cnf", and "client.cnf" files to specify
-the use of SHA1 for the certificates. To do this, change the
-'default_md' entry in those files from 'md5' to 'sha1'.
diff --git a/src/test/setup/radius-config/freeradius/certs_2/bootstrap b/src/test/setup/radius-config/freeradius/certs_2/bootstrap
deleted file mode 100755
index 82f93ec..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/bootstrap
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/sh
-#
-#  This is a wrapper script to create default certificates when the
-#  server first starts in debugging mode.  Once the certificates have been
-#  created, this file should be deleted.
-#
-#  Ideally, this program should be run as part of the installation of any
-#  binary package.  The installation should also ensure that the permissions
-#  and owners are correct for the files generated by this script.
-#
-#  $Id: c9d939beac8d5bdc21ea1ff9233442f9ab933297 $
-#
-umask 027
-cd `dirname $0`
-
-make -h > /dev/null 2>&1
-
-#
-#  If we have a working "make", then use it.  Otherwise, run the commands
-#  manually.
-#
-if [ "$?" = "0" ]; then
-  make all
-  exit $?
-fi
-
-#
-#  The following commands were created by running "make -n", and edited
-#  to remove the trailing backslash, and to add "exit 1" after the commands.
-#
-#  Don't edit the following text.  Instead, edit the Makefile, and
-#  re-generate these commands.
-#
-if [ ! -f dh ]; then
-  openssl dhparam -out dh 1024 || exit 1
-  if [ -e /dev/urandom ] ; then
-	ln -sf /dev/urandom random
-  else
-	date > ./random;
-  fi
-fi
-
-if [ ! -f server.key ]; then
-  openssl req -new  -out server.csr -keyout server.key -config ./server.cnf || exit 1
-fi
-
-if [ ! -f ca.key ]; then
-  openssl req -new -x509 -keyout ca.key -out ca.pem -days `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'` -config ./ca.cnf || exit 1
-fi
-
-if [ ! -f index.txt ]; then
-  touch index.txt
-fi
-
-if [ ! -f serial ]; then
-  echo '01' > serial
-fi
-
-if [ ! -f server.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf || exit 1
-fi
-
-if [ ! -f server.p12 ]; then
-  openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-fi
-
-if [ ! -f server.pem ]; then
-  openssl pkcs12 -in server.p12 -out server.pem -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-  openssl verify -CAfile ca.pem server.pem || exit 1
-fi
-
-if [ ! -f ca.der ]; then
-  openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der || exit 1
-fi
-
-if [ ! -f client.key ]; then
-  openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-fi
-
-if [ ! -f client.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-fi
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.cnf b/src/test/setup/radius-config/freeradius/certs_2/ca.cnf
deleted file mode 100644
index 949695c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/ca.cnf
+++ /dev/null
@@ -1,62 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 2060
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= optional
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= certificate_authority
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-x509_extensions		= v3_ca
-
-[certificate_authority]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Certificate Authority"
-
-[v3_ca]
-subjectKeyIdentifier	= hash
-authorityKeyIdentifier	= keyid:always,issuer:always
-basicConstraints	= CA:true
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.der b/src/test/setup/radius-config/freeradius/certs_2/ca.der
deleted file mode 100644
index 9e4cb56..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/ca.der
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.key b/src/test/setup/radius-config/freeradius/certs_2/ca.key
deleted file mode 100644
index 15e7721..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/ca.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAtiRW+dx95F6X9qlfLmI4OVrkfuxSlTtr6nBPud6mGaMoOHQY
-9f+3kZTCCQHXNAASPQg+jFkh9IAZIU/+apNQraybup6sCuKHQTeQFGmcEolkH/iv
-U5NnTgKy5FBAk6pwjSZklZQoRxqE3jJm9HLF6IJ4UWhzQ3DvzsxAeEP2VlOolSgC
-9q61R7UYKMbjoNLgzOlY0aSqfn9kLyVDX3wBx4zB/aklG45CDlw/Cnrn50o0tQwu
-JJRXsmm+YCih/gwe0ZoHEocRJnaM+I1NPa4oj/srEfc+v9y1+3dtxJfZ3irj0Ha1
-0QhHMLRqeSYKvEVyr0JvcwZo27BYC2Td84Yq6wIDAQABAoIBAF+LTHyhJwDGhhyD
-2EPyjd+4SCg0O1XA5+kApTGxE5xbM9v5OqbAT0D0sZwb9RF4ABk32+evUdp537TT
-y3eIc0fHRaFtXn4POjgBt3idtZsbqwQCZaYHqqcHT2PonRdYztWcECNOppo11QDh
-m5QbKKnA1vCNQ/laotDt39u2et/o+/KsJRLigBkNfMILFddb9mYCizf9wV6rJF3T
-RtlVc5TW7+sz+u1vf6p2iIWZhEyofkMOyRPknnNzvZSRRbNL6A2errilYTfssnMH
-X6KP/qyOA+9pTkO/kI+HqVTsSjCTQN+zPZIyZcz4zBkG9yAj6JJqm0NSNCaU/td+
-sgchxcECgYEA2uw/A6bJafIxtZS+W1vUw/Kb80C3SKRlB5z9tT0zKu3F/tB5f1gA
-hbcu/6vLqv12bJi2KFaeGnv8JfMBrSHkHNL9LuMkprBlCNPWbZdc3KIz998eu80N
-xSrKRTQOwkDsjxBKRMy++BXO7fEPSLm+BsVS7GYw973KwmMEs+XqP3ECgYEA1P1m
-Gjs97npcuT7cKosB2kuHVlOLgjJqhjSJyTnMeU3V6wegSL65ni6XVvWzpagCE/NP
-7in1r3zqmgSFUW6iYCNojT9N9+wkiDYYmSFxWkCFFdGJ94ozENAE+j4DzeavrDn8
-DwnMDowrhXvWtzvcO7eDX9uhZNbCFVlH849gGhsCgYAl8o15tI91LeYv83iLnX9j
-98nQgIdEsDpS94OCxvj3l/+aC8kDqd1tUDN1XfIP0Z+Vc2Mf2uPM83Dhox3HRPil
-hm0ck4Lm4eyhexbQKEyQcg1QCfzp0MFyS9iD5mIoidh3c/trsZHort8Pn5lmocuu
-oMWe9JfIWb2rtD48cL4KAQKBgAdgl47FCooeFuQbt0iJ9BB5mUB2OFjUIjhvzW34
-4Io7FfPEGdsrMAa4mmlarVRJk3jZbqVTMr1XWFDpYyzE5j7qOw7ZaHvz4/gL16JG
-NETqAzIURfEbClXEvsdkKoGRQfh3MbHbpwPB+yfueCt0mVZALv68/sQdZC+VHhbj
-mnKNAoGAYarTY8XI0tHRhgM/y6C7wrOnNI32FmyvQo6lVzXKU0Da/g9UDwgl2sNy
-L+RG/iIh7wDBfskwTWQ6G2nrZlmFSQQDt8Yxmbmdf2c8AqD7mJM2PdWmkt/wwJNi
-YMqDvxcEfuLOa8uOrox1y4qLw2XlkV+7s52najQ+F2tZjwWcBZw=
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.pem b/src/test/setup/radius-config/freeradius/certs_2/ca.pem
deleted file mode 100644
index cdda0f5..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/ca.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAN3OagiHm6AXMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECAwCQ0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UE
-CgwKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDDB1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNzAzMTEw
-MDQ3NDNaFw0yMjEwMzEwMDQ3NDNaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECAwC
-Q0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UECgwKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDDB1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBALYkVvncfeRel/apXy5iODla5H7sUpU7a+pwT7nephmjKDh0GPX/t5GUwgkB
-1zQAEj0IPoxZIfSAGSFP/mqTUK2sm7qerArih0E3kBRpnBKJZB/4r1OTZ04CsuRQ
-QJOqcI0mZJWUKEcahN4yZvRyxeiCeFFoc0Nw787MQHhD9lZTqJUoAvautUe1GCjG
-46DS4MzpWNGkqn5/ZC8lQ198AceMwf2pJRuOQg5cPwp65+dKNLUMLiSUV7JpvmAo
-of4MHtGaBxKHESZ2jPiNTT2uKI/7KxH3Pr/ctft3bcSX2d4q49B2tdEIRzC0ankm
-CrxFcq9Cb3MGaNuwWAtk3fOGKusCAwEAAaOCASwwggEoMB0GA1UdDgQWBBRtf8rH
-zJW7rliW1eZnbVbSb3obfDCBwAYDVR0jBIG4MIG1gBRtf8rHzJW7rliW1eZnbVbS
-b3obfKGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
-DAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDdzmoIh5ugFzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAKWjORcBc1WK3r8mq88ipUC2UR1qvxdON4K/hd+rdAj0E/xA
-QCJDORKno8f2MktqLfhU0amCVBvwdfmVFmVDtl38b1pu+mNFO+FDp04039Fd5ThM
-iYmiQjnJ2IcAi/CILtrjURvJUPSOX9lviOtcla0HW94dgA9IDRs5frrWO9jkcxXR
-+oz3LNMfVnXqhoHHQ1RtvqOozhEsUZZWY5MuUxRY25peeZ7m1vz+zDa/DbrV1wsP
-dxOocmYdGFIAT9AiRnR4Jc/hqabBVNMZlGAA+2dELajpaHqb4yx5gBLVkT7VgHjI
-7cp7jLRL7T+i4orZiAXpeEpAeOrP8r0DYTJi/8A=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.cnf b/src/test/setup/radius-config/freeradius/certs_2/client.cnf
deleted file mode 100644
index 16686e1..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.cnf
+++ /dev/null
@@ -1,53 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 2060
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= client
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-
-[client]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= user@ciena.com
-commonName		= user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.crt b/src/test/setup/radius-config/freeradius/certs_2/client.crt
deleted file mode 100644
index a35ea0c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.crt
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 6 (0x6)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:47:44 2017 GMT
-            Not After : Oct 31 00:47:44 2022 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:e4:f1:8b:9f:93:52:e6:a8:b1:48:7b:7f:fe:14:
-                    f6:e9:cb:57:7a:87:08:2e:d2:7f:77:b4:a3:c1:7e:
-                    14:fe:01:55:08:88:b2:55:13:12:ea:70:81:dd:d5:
-                    92:25:40:87:15:fb:e0:e7:37:c3:b5:c5:f7:c9:ac:
-                    2b:1e:dd:4d:eb:69:41:a6:4f:66:01:20:1a:20:e6:
-                    cc:6c:e5:00:69:30:a8:3e:05:3b:92:68:b4:93:20:
-                    23:4a:e9:71:8c:3e:33:fc:fe:58:35:60:27:1d:c8:
-                    f9:51:b2:0b:a9:45:2a:12:09:59:39:53:c9:28:5e:
-                    c8:c6:29:72:04:af:3a:e3:78:cf:c9:1e:08:d8:36:
-                    b9:a4:59:20:eb:c8:7f:a6:94:09:31:7b:7e:9d:87:
-                    0b:fe:3a:f3:fd:d3:e9:1f:d5:b9:82:85:35:da:6c:
-                    c9:4d:68:81:b4:2a:09:42:f8:58:73:36:b9:6c:fa:
-                    1b:ba:f3:5c:8d:3f:b2:49:0a:7a:6a:06:a2:e1:70:
-                    fb:42:37:c9:d3:e9:1a:98:96:2b:83:69:3c:a1:da:
-                    c7:87:51:19:a3:b5:36:64:c3:0a:da:c9:38:02:8f:
-                    43:1f:02:61:81:f4:1c:c3:69:05:b6:2f:89:d4:e7:
-                    1c:1c:58:50:1a:d6:36:9c:ce:27:fc:60:a2:96:b0:
-                    03:c9
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         a2:c6:b9:c1:5f:d1:03:65:ce:fd:25:46:5b:1e:4a:26:44:0d:
-         06:7e:b3:2a:e2:d1:38:37:64:9b:55:30:a6:47:52:66:96:28:
-         5f:0d:9d:b3:a6:0f:3d:bc:55:f6:01:5e:8a:d0:9c:3d:94:14:
-         86:8d:fc:16:0d:4b:62:b3:56:f5:4b:03:3c:2d:87:01:84:b1:
-         ae:f0:a2:9d:32:c2:d8:db:aa:2e:c3:e3:ec:d4:ae:fb:8d:65:
-         df:b8:46:54:6e:16:4e:33:d7:3e:a7:ad:df:77:b4:9a:ac:f5:
-         79:85:67:dd:2b:06:bd:99:7d:f2:c8:06:e8:00:c3:d5:1a:94:
-         6f:48:29:12:87:26:a0:df:b2:c7:95:b4:84:80:30:65:03:c1:
-         b7:89:df:01:67:46:1f:d0:1c:25:32:ff:1a:3a:fb:e6:6e:29:
-         dd:c4:18:9b:60:27:44:ea:7e:06:29:b8:00:42:ef:d3:65:88:
-         97:be:ef:61:68:af:97:3a:24:d4:97:d9:df:c1:8b:f7:8f:82:
-         30:b3:92:28:db:fd:38:b8:dc:33:fb:a1:e5:0e:8f:07:ee:31:
-         bf:c9:00:8e:63:bf:16:af:53:e9:fd:0a:71:03:4d:3d:8e:b7:
-         a2:66:42:8e:b1:62:30:43:2f:a0:e0:f8:18:88:3e:59:5d:92:
-         28:bb:10:d2
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQ0WhcN
-MjIxMDMxMDA0NzQ0WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7Sf3e0o8F+FP4BVQiIslUTEupw
-gd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEgGiDmzGzlAGkwqD4FO5JotJMg
-I0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTySheyMYpcgSvOuN4z8keCNg2uaRZ
-IOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1ogbQqCUL4WHM2uWz6G7rzXI0/
-skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1NmTDCtrJOAKPQx8CYYH0HMNp
-BbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAosa5wV/RA2XO/SVGWx5K
-JkQNBn6zKuLRODdkm1UwpkdSZpYoXw2ds6YPPbxV9gFeitCcPZQUho38Fg1LYrNW
-9UsDPC2HAYSxrvCinTLC2NuqLsPj7NSu+41l37hGVG4WTjPXPqet33e0mqz1eYVn
-3SsGvZl98sgG6ADD1RqUb0gpEocmoN+yx5W0hIAwZQPBt4nfAWdGH9AcJTL/Gjr7
-5m4p3cQYm2AnROp+Bim4AELv02WIl77vYWivlzok1JfZ38GL94+CMLOSKNv9OLjc
-M/uh5Q6PB+4xv8kAjmO/Fq9T6f0KcQNNPY63omZCjrFiMEMvoOD4GIg+WV2SKLsQ
-0g==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.csr b/src/test/setup/radius-config/freeradius/certs_2/client.csr
deleted file mode 100644
index db3b62a..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.csr
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICwDCCAagCAQAwezELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
-DAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5hIEluYy4xHTAbBgkqhkiG9w0BCQEW
-DnVzZXJAY2llbmEuY29tMRcwFQYDVQQDDA51c2VyQGNpZW5hLmNvbTCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7S
-f3e0o8F+FP4BVQiIslUTEupwgd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEg
-GiDmzGzlAGkwqD4FO5JotJMgI0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTyShe
-yMYpcgSvOuN4z8keCNg2uaRZIOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1o
-gbQqCUL4WHM2uWz6G7rzXI0/skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1
-NmTDCtrJOAKPQx8CYYH0HMNpBbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaAA
-MA0GCSqGSIb3DQEBCwUAA4IBAQAqO0ff9oJZ+6sljU5yyws8+SeMB4eCA1/hmqcD
-/pRHmAflF3/gAIKGmntj+VaiaPPoBYhvoXJEBeLGmGZMjmwFNWIow++MlrsXv2/K
-HtLz8NOKbm6f6ILkptAGg/QsxxaAKNygA/h2UZhe3lw//obSgJsrbRU1mMWo1H+j
-qgmYCjRfy8N4lPr+0sZ+0mW3p6kBZjmjBQoKFXk7bv6AMbLplBKc1mFGHVsD7jlN
-FKNt8fkxmURKRNd7InI7q/7JM2Q/z4lVUj2cmlzYtVTOXgAV51+wH34oLKKIgq/u
-qivRN8O8adk50sjXOkstLQJUVbhkNhI9oNnC/GVL0HOfljl3
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.key b/src/test/setup/radius-config/freeradius/certs_2/client.key
deleted file mode 100644
index 2b35f89..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEA5PGLn5NS5qixSHt//hT26ctXeocILtJ/d7SjwX4U/gFVCIiy
-VRMS6nCB3dWSJUCHFfvg5zfDtcX3yawrHt1N62lBpk9mASAaIObMbOUAaTCoPgU7
-kmi0kyAjSulxjD4z/P5YNWAnHcj5UbILqUUqEglZOVPJKF7IxilyBK8643jPyR4I
-2Da5pFkg68h/ppQJMXt+nYcL/jrz/dPpH9W5goU12mzJTWiBtCoJQvhYcza5bPob
-uvNcjT+ySQp6agai4XD7QjfJ0+kamJYrg2k8odrHh1EZo7U2ZMMK2sk4Ao9DHwJh
-gfQcw2kFti+J1OccHFhQGtY2nM4n/GCilrADyQIDAQABAoIBAF+xepvDl3Yj8p4K
-SPBp0N7eCH2FbW0svOzLC1t4GMwmwGUlxex7YX+ucQnJGCIL+6q7skDS9THIQo8A
-MLHg7I3GXBNowokb4u/3qGCnw2k0Vk4+H61NqJSKbVYFh1mIMnC/2xzMSO1RMKzu
-D6O77h7F245zr/P40lDJyAefOq0S6sgZqxmHmoRTHhp0tXV4mkzV7P7RqoJrvAiG
-tGMk5OfdoILnNfoeXNC50nw201UK7xhrrqqlAWZRAaUZJtsb1GxW+jOra6OtsCVg
-AKS/FxzUGMsoqluM5mHLBEN69DAvBBs8g7UVKdmCDZC+feJ31eAnPAoc1gxBHdQU
-pCnA8dECgYEA+Q6X80gnoyG0g66Gh62R7RgDLYPpgkZimLeoX49mwizAUkeSf/Mj
-raVajRmJ8J1n4UklHdQe0PE9Jhuxo4Uo9sP71ZqpQPEvN35/Sw0xxQHcwxD73SWa
-UEVsnWIDJ6QrkoBOhjDMM6tyDSPVDS23352E6sZ9EU45qWvncb5OTdUCgYEA61Np
-Qs/CpEWtPG8IiEPKPEWUEwoO8SS6C4R/UfXNC96GhfIpA4Uy3fQwTUtHEMPL+7lh
-SPFPQDBH90jOTYg30EfHiBMlKW4l21XS+PotTP3ktqZMgx06SnoM2a/+crpzFqkb
-i4eAPCsdTispElbtqleLuUbFO9aG3jHMsK2RtCUCgYB04G9YYL0RJgkTXryNQVvg
-ussK+gOD+kncxxtBtQcRCnU6Z5INb2mH3LgwzRJZk1SjeXLsm5XWkc8Tah2j0iKW
-IwS0if7xlf2Felx8OPXpMOWLuRWpAzN2hg3hkZRPbxBvkLzI5m99s/Ay0GTz6UeH
-reEpV/prO519r0COtTMD/QKBgCdRinbVS8oysh002BIccX/ciD8eIRz9a/BctQE2
-tonTJlre+SdTbApVsHRZrYgJjt2CPdT4LKum5X9VtNKTfe/Y7me3+y+O7dhV4Kgk
-9Mi2ay5xXrtReNnUxqzgkP0OVghlPOr1OuHSulTDNVuRFqitc/UC9BVpZKNfYrnq
-ZjvZAoGBALzgzXajgIdhghCt5PFLkhh3xyOliTXWFstHcMZdQF2wQyeF/uQ2zrC/
-2t1Sa+egV3QDUPYzW9YLQs9eaLh7MS9wCHLY2SMElAqYiNjRfkT4wWdPfeyFx4+E
-Euwtu+lPJ7sEpNu5jX63OS2AeZsQYlsT0Ai+lB4TeyoE6Pj04iC0
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.p12 b/src/test/setup/radius-config/freeradius/certs_2/client.p12
deleted file mode 100644
index e2a0a6c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.pem b/src/test/setup/radius-config/freeradius/certs_2/client.pem
deleted file mode 100644
index 2c48dd8..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/client.pem
+++ /dev/null
@@ -1,57 +0,0 @@
-Bag Attributes
-    localKeyID: 24 9D AF D0 5D 7E 89 BC B6 71 93 BC 6E E9 D7 DC 5A AB F1 24
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQ0WhcN
-MjIxMDMxMDA0NzQ0WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7Sf3e0o8F+FP4BVQiIslUTEupw
-gd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEgGiDmzGzlAGkwqD4FO5JotJMg
-I0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTySheyMYpcgSvOuN4z8keCNg2uaRZ
-IOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1ogbQqCUL4WHM2uWz6G7rzXI0/
-skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1NmTDCtrJOAKPQx8CYYH0HMNp
-BbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAosa5wV/RA2XO/SVGWx5K
-JkQNBn6zKuLRODdkm1UwpkdSZpYoXw2ds6YPPbxV9gFeitCcPZQUho38Fg1LYrNW
-9UsDPC2HAYSxrvCinTLC2NuqLsPj7NSu+41l37hGVG4WTjPXPqet33e0mqz1eYVn
-3SsGvZl98sgG6ADD1RqUb0gpEocmoN+yx5W0hIAwZQPBt4nfAWdGH9AcJTL/Gjr7
-5m4p3cQYm2AnROp+Bim4AELv02WIl77vYWivlzok1JfZ38GL94+CMLOSKNv9OLjc
-M/uh5Q6PB+4xv8kAjmO/Fq9T6f0KcQNNPY63omZCjrFiMEMvoOD4GIg+WV2SKLsQ
-0g==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 24 9D AF D0 5D 7E 89 BC B6 71 93 BC 6E E9 D7 DC 5A AB F1 24
-Key Attributes: <No Attributes>
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEA5PGLn5NS5qixSHt//hT26ctXeocILtJ/d7SjwX4U/gFVCIiy
-VRMS6nCB3dWSJUCHFfvg5zfDtcX3yawrHt1N62lBpk9mASAaIObMbOUAaTCoPgU7
-kmi0kyAjSulxjD4z/P5YNWAnHcj5UbILqUUqEglZOVPJKF7IxilyBK8643jPyR4I
-2Da5pFkg68h/ppQJMXt+nYcL/jrz/dPpH9W5goU12mzJTWiBtCoJQvhYcza5bPob
-uvNcjT+ySQp6agai4XD7QjfJ0+kamJYrg2k8odrHh1EZo7U2ZMMK2sk4Ao9DHwJh
-gfQcw2kFti+J1OccHFhQGtY2nM4n/GCilrADyQIDAQABAoIBAF+xepvDl3Yj8p4K
-SPBp0N7eCH2FbW0svOzLC1t4GMwmwGUlxex7YX+ucQnJGCIL+6q7skDS9THIQo8A
-MLHg7I3GXBNowokb4u/3qGCnw2k0Vk4+H61NqJSKbVYFh1mIMnC/2xzMSO1RMKzu
-D6O77h7F245zr/P40lDJyAefOq0S6sgZqxmHmoRTHhp0tXV4mkzV7P7RqoJrvAiG
-tGMk5OfdoILnNfoeXNC50nw201UK7xhrrqqlAWZRAaUZJtsb1GxW+jOra6OtsCVg
-AKS/FxzUGMsoqluM5mHLBEN69DAvBBs8g7UVKdmCDZC+feJ31eAnPAoc1gxBHdQU
-pCnA8dECgYEA+Q6X80gnoyG0g66Gh62R7RgDLYPpgkZimLeoX49mwizAUkeSf/Mj
-raVajRmJ8J1n4UklHdQe0PE9Jhuxo4Uo9sP71ZqpQPEvN35/Sw0xxQHcwxD73SWa
-UEVsnWIDJ6QrkoBOhjDMM6tyDSPVDS23352E6sZ9EU45qWvncb5OTdUCgYEA61Np
-Qs/CpEWtPG8IiEPKPEWUEwoO8SS6C4R/UfXNC96GhfIpA4Uy3fQwTUtHEMPL+7lh
-SPFPQDBH90jOTYg30EfHiBMlKW4l21XS+PotTP3ktqZMgx06SnoM2a/+crpzFqkb
-i4eAPCsdTispElbtqleLuUbFO9aG3jHMsK2RtCUCgYB04G9YYL0RJgkTXryNQVvg
-ussK+gOD+kncxxtBtQcRCnU6Z5INb2mH3LgwzRJZk1SjeXLsm5XWkc8Tah2j0iKW
-IwS0if7xlf2Felx8OPXpMOWLuRWpAzN2hg3hkZRPbxBvkLzI5m99s/Ay0GTz6UeH
-reEpV/prO519r0COtTMD/QKBgCdRinbVS8oysh002BIccX/ciD8eIRz9a/BctQE2
-tonTJlre+SdTbApVsHRZrYgJjt2CPdT4LKum5X9VtNKTfe/Y7me3+y+O7dhV4Kgk
-9Mi2ay5xXrtReNnUxqzgkP0OVghlPOr1OuHSulTDNVuRFqitc/UC9BVpZKNfYrnq
-ZjvZAoGBALzgzXajgIdhghCt5PFLkhh3xyOliTXWFstHcMZdQF2wQyeF/uQ2zrC/
-2t1Sa+egV3QDUPYzW9YLQs9eaLh7MS9wCHLY2SMElAqYiNjRfkT4wWdPfeyFx4+E
-Euwtu+lPJ7sEpNu5jX63OS2AeZsQYlsT0Ai+lB4TeyoE6Pj04iC0
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/dh b/src/test/setup/radius-config/freeradius/certs_2/dh
deleted file mode 100644
index e7b4f90..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/dh
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN DH PARAMETERS-----
-MIGHAoGBAKHERxCGYaLWD6ay09DuGxxs5whd4zFUS1pjA7jEvGwnbISSzGvzRbYi
-ymNeNgzrZhHiWo5GC008yLvUy0qxVMny0x+7xybup+mOv6ITEz+HuhlsBN+Aqc5P
-Oyq7h1qnuy8UiiEP87YcwhCFooQ3I8dCcMT7AVApYex4K81Sck/LAgEC
------END DH PARAMETERS-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0 b/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0
deleted file mode 120000
index e375f5a..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0
+++ /dev/null
@@ -1 +0,0 @@
-ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt b/src/test/setup/radius-config/freeradius/certs_2/index.txt
deleted file mode 100644
index 26b614b..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/index.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	170306185336Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-V	180306004618Z		03	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	180306004619Z		04	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-V	221031004743Z		05	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	221031004744Z		06	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr
deleted file mode 100644
index 3a7e39e..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = no
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old
deleted file mode 100644
index 3a7e39e..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = no
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.old b/src/test/setup/radius-config/freeradius/certs_2/index.txt.old
deleted file mode 100644
index f9870bc..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/index.txt.old
+++ /dev/null
@@ -1,5 +0,0 @@
-V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	170306185336Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-V	180306004618Z		03	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	180306004619Z		04	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-V	221031004743Z		05	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/serial b/src/test/setup/radius-config/freeradius/certs_2/serial
deleted file mode 100644
index 2c7456e..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/serial
+++ /dev/null
@@ -1 +0,0 @@
-07
diff --git a/src/test/setup/radius-config/freeradius/certs_2/serial.old b/src/test/setup/radius-config/freeradius/certs_2/serial.old
deleted file mode 100644
index cd672a5..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/serial.old
+++ /dev/null
@@ -1 +0,0 @@
-06
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.cnf b/src/test/setup/radius-config/freeradius/certs_2/server.cnf
deleted file mode 100644
index d1f4c7b..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.cnf
+++ /dev/null
@@ -1,54 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/server.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/server.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 2060
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= optional
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= server
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-
-[server]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Server Certificate"
-
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.crt b/src/test/setup/radius-config/freeradius/certs_2/server.crt
deleted file mode 100644
index f206dde..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.crt
+++ /dev/null
@@ -1,80 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 5 (0x5)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Mar 11 00:47:43 2017 GMT
-            Not After : Oct 31 00:47:43 2022 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:cb:b3:5b:bc:a3:20:e3:79:6d:a0:45:d9:cc:6d:
-                    54:4a:53:d0:72:66:92:b7:00:35:43:9c:a0:99:5c:
-                    94:f0:65:d3:c1:a1:e8:4b:5f:19:57:dc:9a:e3:52:
-                    ba:61:98:e6:a2:73:8b:e1:72:0b:53:e5:50:35:ab:
-                    58:ec:04:ff:b0:78:ab:f6:61:d0:8a:c8:43:af:c2:
-                    fe:43:26:20:4c:78:cd:01:b7:d0:70:d4:2d:f3:c8:
-                    1b:c7:84:aa:be:57:6b:49:b0:f8:66:db:ec:2c:68:
-                    05:9c:8f:2d:e2:a3:a3:be:f6:8b:a9:d3:f4:01:96:
-                    d2:76:1e:1b:a7:b5:87:a5:ed:b6:a6:2e:50:76:05:
-                    11:84:92:b0:d8:70:46:1f:3c:ee:07:fc:ca:45:dd:
-                    ca:df:f7:0b:27:05:6d:4d:ce:02:39:49:92:c3:87:
-                    f8:44:76:e2:b4:9c:2e:00:18:8d:0b:8f:9a:8c:7f:
-                    cd:99:81:d2:1c:d2:f7:48:5b:12:4d:40:ef:4c:cf:
-                    3b:ea:f4:6e:60:c3:a4:a7:df:fb:0f:1b:39:75:7f:
-                    60:0a:d6:d7:9a:9a:f0:80:aa:b6:d5:da:cc:08:64:
-                    80:a2:dd:0a:01:1e:6f:9b:19:49:c5:97:b6:2e:6d:
-                    c5:8c:98:4c:13:a4:15:6a:d5:fc:66:cf:17:83:0e:
-                    bf:fb
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         a0:f6:b8:4d:6f:f5:5e:ae:04:98:c8:44:cd:24:4d:d9:f1:a2:
-         f5:c1:ac:97:d5:d9:30:e3:f8:e0:40:97:ac:bf:71:61:fa:c1:
-         7c:c6:7b:09:36:76:12:a1:9d:3a:91:c0:11:6b:35:6b:5b:32:
-         ec:72:af:84:20:c0:a8:81:50:20:aa:98:92:52:20:c6:a4:97:
-         00:7a:4c:70:ba:b5:c4:25:46:7e:c5:46:48:92:cc:e0:6b:d7:
-         a7:2c:25:77:f9:d3:11:e0:a0:b5:02:bb:c0:43:98:dd:4e:5c:
-         57:d7:f4:25:5b:a5:15:f2:db:23:62:e0:f7:20:66:e4:6d:f8:
-         10:73:43:22:df:80:25:7e:6e:ce:13:26:56:96:1d:39:f0:00:
-         0e:a7:8d:82:b9:7a:8a:7a:17:7e:7b:eb:19:df:26:36:a0:e4:
-         71:fd:09:ac:43:42:92:df:a2:f8:2b:4f:51:28:9d:8e:0c:ac:
-         3c:d5:ec:b8:97:89:57:f7:c8:bb:d3:3e:67:95:66:85:27:69:
-         7a:1e:fe:f5:a3:f9:df:c5:52:44:21:89:ed:c4:bb:20:d9:37:
-         80:fa:c3:f2:44:22:5f:fa:ea:5d:0c:59:10:07:d8:fa:91:e1:
-         9c:98:4e:c3:d4:ae:0e:5a:f4:6b:c4:c6:4a:dd:b7:d5:a3:a7:
-         c0:72:fd:04
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQzWhcN
-MjIxMDMxMDA0NzQzWjB0MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xIzAhBgNVBAMMGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDLs1u8oyDjeW2gRdnMbVRKU9ByZpK3ADVDnKCZ
-XJTwZdPBoehLXxlX3JrjUrphmOaic4vhcgtT5VA1q1jsBP+weKv2YdCKyEOvwv5D
-JiBMeM0Bt9Bw1C3zyBvHhKq+V2tJsPhm2+wsaAWcjy3io6O+9oup0/QBltJ2Hhun
-tYel7bamLlB2BRGEkrDYcEYfPO4H/MpF3crf9wsnBW1NzgI5SZLDh/hEduK0nC4A
-GI0Lj5qMf82ZgdIc0vdIWxJNQO9Mzzvq9G5gw6Sn3/sPGzl1f2AK1teamvCAqrbV
-2swIZICi3QoBHm+bGUnFl7YubcWMmEwTpBVq1fxmzxeDDr/7AgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAKD2
-uE1v9V6uBJjIRM0kTdnxovXBrJfV2TDj+OBAl6y/cWH6wXzGewk2dhKhnTqRwBFr
-NWtbMuxyr4QgwKiBUCCqmJJSIMaklwB6THC6tcQlRn7FRkiSzOBr16csJXf50xHg
-oLUCu8BDmN1OXFfX9CVbpRXy2yNi4PcgZuRt+BBzQyLfgCV+bs4TJlaWHTnwAA6n
-jYK5eop6F3576xnfJjag5HH9CaxDQpLfovgrT1EonY4MrDzV7LiXiVf3yLvTPmeV
-ZoUnaXoe/vWj+d/FUkQhie3EuyDZN4D6w/JEIl/66l0MWRAH2PqR4ZyYTsPUrg5a
-9GvExkrdt9Wjp8By/QQ=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.csr b/src/test/setup/radius-config/freeradius/certs_2/server.csr
deleted file mode 100644
index 207f644..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.csr
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICzjCCAbYCAQAwgYgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UE
-BwwJU29tZXdoZXJlMRMwEQYDVQQKDApDaWVuYSBJbmMuMR4wHAYJKoZIhvcNAQkB
-Fg9hZG1pbkBjaWVuYS5jb20xIzAhBgNVBAMMGkV4YW1wbGUgU2VydmVyIENlcnRp
-ZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy7NbvKMg43lt
-oEXZzG1USlPQcmaStwA1Q5ygmVyU8GXTwaHoS18ZV9ya41K6YZjmonOL4XILU+VQ
-NatY7AT/sHir9mHQishDr8L+QyYgTHjNAbfQcNQt88gbx4SqvldrSbD4ZtvsLGgF
-nI8t4qOjvvaLqdP0AZbSdh4bp7WHpe22pi5QdgURhJKw2HBGHzzuB/zKRd3K3/cL
-JwVtTc4COUmSw4f4RHbitJwuABiNC4+ajH/NmYHSHNL3SFsSTUDvTM876vRuYMOk
-p9/7Dxs5dX9gCtbXmprwgKq21drMCGSAot0KAR5vmxlJxZe2Lm3FjJhME6QVatX8
-Zs8Xgw6/+wIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAA6Y5lkWVd/otgQ4mphq
-5vQWpWuAUcGbp+9pDn6xxjJ4eFMXoFAAk+gqnDXrlAxlC+60Mt32lAOSmqvFe+7s
-a2nmOiGjacJW8YSXL5rIIeaRukKYly9GvvXjCuVVNyQXTd6/Ltx4636E9I/kwXyT
-+DEMIRzq1bY6TYPvTjN0RtPC+cPkjR1OyRRiCbn+yg8lsWm7JqBLzAaoHJxtP96P
-CsUyegPLiUqwFdzuJEHDV56/iFYDwo4ZRu2KJeWcv5fGXCqGYuilbPoySFXZDhAb
-2U1i6ZRu2srWj/noIbHP6hk8LA+VycI5baJY5JEbPiyL1TowLB/4UzwfaKk+Y5Rs
-Trw=
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.key b/src/test/setup/radius-config/freeradius/certs_2/server.key
deleted file mode 100644
index 93e9d6c..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAy7NbvKMg43ltoEXZzG1USlPQcmaStwA1Q5ygmVyU8GXTwaHo
-S18ZV9ya41K6YZjmonOL4XILU+VQNatY7AT/sHir9mHQishDr8L+QyYgTHjNAbfQ
-cNQt88gbx4SqvldrSbD4ZtvsLGgFnI8t4qOjvvaLqdP0AZbSdh4bp7WHpe22pi5Q
-dgURhJKw2HBGHzzuB/zKRd3K3/cLJwVtTc4COUmSw4f4RHbitJwuABiNC4+ajH/N
-mYHSHNL3SFsSTUDvTM876vRuYMOkp9/7Dxs5dX9gCtbXmprwgKq21drMCGSAot0K
-AR5vmxlJxZe2Lm3FjJhME6QVatX8Zs8Xgw6/+wIDAQABAoIBAHsyGoVX04g/5EFO
-fY7ZgGUDk8ncp1buVUhy7d5S0/EgZJVzKl6LQXSWA0cAvaBRLQuK/kp3AsrWoVAU
-NU9pJorAMKBuQAXvYKBoOga6hEY7uSEU3oeei7wohkSAxBMiI80JrvHmiEvDDcHG
-BmZPubTAv28StTxQzzCIvQkNDOEYcuo4WCAMLZnGYbVrnotmqkL1aOMWvBls4ELR
-pBjWTKGVergONIvGY/Vjjzr9nAQ7qUSJoONABmNo26FZVpytFT3JDwHgFeyg9ws5
-M0J+27sX/lCZzAK9TbSEhQoh+Ig7lyzNT59VOCYlkzioJeWqQzrlOuHAiCDGKtT8
-RUoC/AECgYEA8oXAQXp9d4+jMbkZdV67RpRUwc+1FgMp71b+xiSivZowdC488ZlC
-JlyeaXyApEFooCNBU/L9qq2vsEfG+sJUeaeJ2xJ6UQ/eaKIlXdlPZM3ftiBjUQVl
-10TUfyewezajgSBmSeK5N7e+He0z3zDskOrN8yl5IIEpuabpunDHT4ECgYEA1wVO
-zwnDS7CM1e38ShsTlfoNfQ1ZNEe59DWY6Ze6TrlIAhpdG1L1zZpdYRwk7HjgisAi
-dfNK0Lw1chS5PjsLI6b8svTZjPh+snMCheINDMgm4CPN8dIk2G1znuZ3Y/Ls5U9v
-uHemdI2SjYoJRXMNqk2rGejUAmqGBv+ccTVCDXsCgYEAjiFksvseXNuJQLLCAM8r
-4gk8Qvl/nw7rnKDXXzhJR+T9UJlojg7Lfb/59shIe/r5XiBLQO+/Si/QLvdh3cPV
-zhYp7IG/vZIXPisIyx94MBOXcB07F1iafraP8pVIEG3NFPfv+o0tD0JX4SIKrFr3
-yVZSrJc5rZsE4RC5GdLOfAECgYAzytmC0U0zbCFgXmTIzODY3VRmbNKx3x0Rj993
-VM7Rq5uGtib7L+55Sdd4TrkuJKehhEpIswOwZ8VP52H/dWVnfw+bY5MLe8e44C8Z
-ZKvUCloi3zz6JK4vW53gBo93ico6Kfo1Kszzgi5s3XUCP4ZpITA3m3eF5XbHNMbQ
-Fg4FBQKBgBpuUJbB7m2MT8MlJCla840Y4+Iv3+rcdU4IWXZLQ3RsxPPSFb0XPSC2
-q7TH3E8ABtUKW+UmDyC8IuZzndL0F0VFMq6KnkYAEKZM0yrxVPDbYMGn5w5mYx5w
-ZF5NKrl4JGDKObSNqPcPDKPgRTRCcCkdHqPMJvV6kvA1xPXfc31j
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.p12 b/src/test/setup/radius-config/freeradius/certs_2/server.p12
deleted file mode 100644
index 64339c3..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.pem b/src/test/setup/radius-config/freeradius/certs_2/server.pem
deleted file mode 100644
index 60e7675..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/server.pem
+++ /dev/null
@@ -1,57 +0,0 @@
-Bag Attributes
-    localKeyID: C7 9B 3E 44 92 2B 08 79 45 B3 53 B2 F3 6D B3 E4 F1 C0 A3 CF
-subject=/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQzWhcN
-MjIxMDMxMDA0NzQzWjB0MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xIzAhBgNVBAMMGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDLs1u8oyDjeW2gRdnMbVRKU9ByZpK3ADVDnKCZ
-XJTwZdPBoehLXxlX3JrjUrphmOaic4vhcgtT5VA1q1jsBP+weKv2YdCKyEOvwv5D
-JiBMeM0Bt9Bw1C3zyBvHhKq+V2tJsPhm2+wsaAWcjy3io6O+9oup0/QBltJ2Hhun
-tYel7bamLlB2BRGEkrDYcEYfPO4H/MpF3crf9wsnBW1NzgI5SZLDh/hEduK0nC4A
-GI0Lj5qMf82ZgdIc0vdIWxJNQO9Mzzvq9G5gw6Sn3/sPGzl1f2AK1teamvCAqrbV
-2swIZICi3QoBHm+bGUnFl7YubcWMmEwTpBVq1fxmzxeDDr/7AgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAKD2
-uE1v9V6uBJjIRM0kTdnxovXBrJfV2TDj+OBAl6y/cWH6wXzGewk2dhKhnTqRwBFr
-NWtbMuxyr4QgwKiBUCCqmJJSIMaklwB6THC6tcQlRn7FRkiSzOBr16csJXf50xHg
-oLUCu8BDmN1OXFfX9CVbpRXy2yNi4PcgZuRt+BBzQyLfgCV+bs4TJlaWHTnwAA6n
-jYK5eop6F3576xnfJjag5HH9CaxDQpLfovgrT1EonY4MrDzV7LiXiVf3yLvTPmeV
-ZoUnaXoe/vWj+d/FUkQhie3EuyDZN4D6w/JEIl/66l0MWRAH2PqR4ZyYTsPUrg5a
-9GvExkrdt9Wjp8By/QQ=
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: C7 9B 3E 44 92 2B 08 79 45 B3 53 B2 F3 6D B3 E4 F1 C0 A3 CF
-Key Attributes: <No Attributes>
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAy7NbvKMg43ltoEXZzG1USlPQcmaStwA1Q5ygmVyU8GXTwaHo
-S18ZV9ya41K6YZjmonOL4XILU+VQNatY7AT/sHir9mHQishDr8L+QyYgTHjNAbfQ
-cNQt88gbx4SqvldrSbD4ZtvsLGgFnI8t4qOjvvaLqdP0AZbSdh4bp7WHpe22pi5Q
-dgURhJKw2HBGHzzuB/zKRd3K3/cLJwVtTc4COUmSw4f4RHbitJwuABiNC4+ajH/N
-mYHSHNL3SFsSTUDvTM876vRuYMOkp9/7Dxs5dX9gCtbXmprwgKq21drMCGSAot0K
-AR5vmxlJxZe2Lm3FjJhME6QVatX8Zs8Xgw6/+wIDAQABAoIBAHsyGoVX04g/5EFO
-fY7ZgGUDk8ncp1buVUhy7d5S0/EgZJVzKl6LQXSWA0cAvaBRLQuK/kp3AsrWoVAU
-NU9pJorAMKBuQAXvYKBoOga6hEY7uSEU3oeei7wohkSAxBMiI80JrvHmiEvDDcHG
-BmZPubTAv28StTxQzzCIvQkNDOEYcuo4WCAMLZnGYbVrnotmqkL1aOMWvBls4ELR
-pBjWTKGVergONIvGY/Vjjzr9nAQ7qUSJoONABmNo26FZVpytFT3JDwHgFeyg9ws5
-M0J+27sX/lCZzAK9TbSEhQoh+Ig7lyzNT59VOCYlkzioJeWqQzrlOuHAiCDGKtT8
-RUoC/AECgYEA8oXAQXp9d4+jMbkZdV67RpRUwc+1FgMp71b+xiSivZowdC488ZlC
-JlyeaXyApEFooCNBU/L9qq2vsEfG+sJUeaeJ2xJ6UQ/eaKIlXdlPZM3ftiBjUQVl
-10TUfyewezajgSBmSeK5N7e+He0z3zDskOrN8yl5IIEpuabpunDHT4ECgYEA1wVO
-zwnDS7CM1e38ShsTlfoNfQ1ZNEe59DWY6Ze6TrlIAhpdG1L1zZpdYRwk7HjgisAi
-dfNK0Lw1chS5PjsLI6b8svTZjPh+snMCheINDMgm4CPN8dIk2G1znuZ3Y/Ls5U9v
-uHemdI2SjYoJRXMNqk2rGejUAmqGBv+ccTVCDXsCgYEAjiFksvseXNuJQLLCAM8r
-4gk8Qvl/nw7rnKDXXzhJR+T9UJlojg7Lfb/59shIe/r5XiBLQO+/Si/QLvdh3cPV
-zhYp7IG/vZIXPisIyx94MBOXcB07F1iafraP8pVIEG3NFPfv+o0tD0JX4SIKrFr3
-yVZSrJc5rZsE4RC5GdLOfAECgYAzytmC0U0zbCFgXmTIzODY3VRmbNKx3x0Rj993
-VM7Rq5uGtib7L+55Sdd4TrkuJKehhEpIswOwZ8VP52H/dWVnfw+bY5MLe8e44C8Z
-ZKvUCloi3zz6JK4vW53gBo93ico6Kfo1Kszzgi5s3XUCP4ZpITA3m3eF5XbHNMbQ
-Fg4FBQKBgBpuUJbB7m2MT8MlJCla840Y4+Iv3+rcdU4IWXZLQ3RsxPPSFb0XPSC2
-q7TH3E8ABtUKW+UmDyC8IuZzndL0F0VFMq6KnkYAEKZM0yrxVPDbYMGn5w5mYx5w
-ZF5NKrl4JGDKObSNqPcPDKPgRTRCcCkdHqPMJvV6kvA1xPXfc31j
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem b/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem
deleted file mode 100644
index 2eb9ece..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem
+++ /dev/null
@@ -1,60 +0,0 @@
-Bag Attributes
-    localKeyID: 24 9D AF D0 5D 7E 89 BC B6 71 93 BC 6E E9 D7 DC 5A AB F1 24 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQ0WhcN
-MjIxMDMxMDA0NzQ0WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7Sf3e0o8F+FP4BVQiIslUTEupw
-gd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEgGiDmzGzlAGkwqD4FO5JotJMg
-I0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTySheyMYpcgSvOuN4z8keCNg2uaRZ
-IOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1ogbQqCUL4WHM2uWz6G7rzXI0/
-skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1NmTDCtrJOAKPQx8CYYH0HMNp
-BbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAosa5wV/RA2XO/SVGWx5K
-JkQNBn6zKuLRODdkm1UwpkdSZpYoXw2ds6YPPbxV9gFeitCcPZQUho38Fg1LYrNW
-9UsDPC2HAYSxrvCinTLC2NuqLsPj7NSu+41l37hGVG4WTjPXPqet33e0mqz1eYVn
-3SsGvZl98sgG6ADD1RqUb0gpEocmoN+yx5W0hIAwZQPBt4nfAWdGH9AcJTL/Gjr7
-5m4p3cQYm2AnROp+Bim4AELv02WIl77vYWivlzok1JfZ38GL94+CMLOSKNv9OLjc
-M/uh5Q6PB+4xv8kAjmO/Fq9T6f0KcQNNPY63omZCjrFiMEMvoOD4GIg+WV2SKLsQ
-0g==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 24 9D AF D0 5D 7E 89 BC B6 71 93 BC 6E E9 D7 DC 5A AB F1 24 
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQICNM5WlsHCScCAggA
-MBQGCCqGSIb3DQMHBAjL/GAZA/7jzwSCBMi+H6LtWzJO1Jp0SLWt0ACrP5C6vNdI
-3iqcnIvQDryQScTo4r0oNat/iL+uzACRr8/lJA5a+eIkGaQownojrlnq6LODjWiO
-zjJlh96ldA7R0CwQARMuH1tbM6637q70xTgHzU3UoHEvk9flrizTDfjMEyp62D+H
-2r+RJyNVEshSW5gHh1QqHgoSYq7ORnW1GmfeoCQVNF0UpP9yMrtS4wsqPffmAS27
-1cBskc77IsksbFLzw1NXT7pFb2NUC2/tP71EZGPR048Iwyts5O9x3v09E7yMMqYw
-1ukzd5Isvnw7w5CE4KQqeRnjdKC5rbRWn+rBMV+19paPlFscuLiXKHAt/y+HT59H
-yDgVoynT4omcdp9q/G20Y1Shnaceqg5LSE/+p4BjmUCuEjjaUzPQArC0syUfcADe
-N8zAas1CFlCcXxFbSxPDoR7y5+OFhD0PRVXM2bvz856/eXmYxZLgZFAqzCadDTOW
-kfQ/jQyweui0fTwAj9YbeWdGzRqMKUqa7voUAwD2l4wKNo14+O7SNYbx/3McbX4X
-tdJsnjJcB2RECNWmDc8JDy94Z0m1CGxE39iHxjxczye6IPsBA3ZtImQNaYC5gkJ/
-9483EtLFfkh+F8W8BrsUflF6Edtgla7JMOf+bJ14BzI460Bkp4JWx6srQjPKvHBM
-p771c46rlU+sgPYM9ZzYRf2D/qcTnlne0RXIUuyQExvx4kfWSl63OYFF68NiObLp
-VVbL4gpK99mcoA104XG0kKqRco0CC+PaaacUWotKI+QA+A5iSZyQtjdCXHh7AgUV
-VlpbnYMwmYZmRik/ePVl+DFZBGxglqeEn0Dwr4lDXAxfyjXufhSUhYkFKZ8RTpGl
-g6pOkSA4/0qSZIz3PqOEe1OdkUDqyu4wbzFPJEgiXIrt0HUPkEo1CFlEblwSeh8a
-jlCTxanG5Y0Bs/+OeOkT1rKTcngnjgg3hABpMBJadTxIF6fk9XMVzmsDMY3C6ZTe
-EgEMGmeojOI2P6isIgrJedq4AeP2gK1QO/nxU3T/qb4rfyIA0EyNoO1OEiXFw18J
-rdrkPqgH1UfvJ8AbpGxBZBPSmpNj5vlz8L5fWnb73t833p9zlrg13BF5R6qK0nqJ
-CGXLnoQKYfWFhI5eYMQNmll1jAbKwFlBEU4AOYdvYKDKwiSdfHxh+/eGi2Jqkftx
-25KAvl9dMO4c3qRdgkBr0ht59R7tuosBwmUstwDxyewa/NzRuVvkmAqw708F1jvj
-Arxwg4KzHSCphZ+G/781aLwwQfPcg/KXsyy8vPuZQD+ftguQLSdN1rwayqu03tKA
-/9VAVMg8/a3en798wrhJamlUK/rL4KbMh/+5XLqpIy22BCuZCjGjsyKGXbc3ncWC
-stfWZhq0KkmvwogCg08zH3cqB1Wmk/BI/7bmeBKZJz9uQ/s5T2RTGnNmhD0CDZY7
-57jwMF+yOpQI/ACPfIIJpejHTVeGyTAkrAu2PlYO6gh6Qx1JBzPSHFGjtfO5FjSm
-nqahmdyGqfk9yqeH5ZkEoLVRhV2ovj8ALPnVMVqhiYrmDyuoM+9gCyssBCp9RROM
-Zuo9cULAJmWVYTbN07natKbfgk08OssaFnwuldlsIbLm5MZDequye3Q+sLzB05/d
-z30=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/xpextensions b/src/test/setup/radius-config/freeradius/certs_2/xpextensions
deleted file mode 100644
index 8e4a9a2..0000000
--- a/src/test/setup/radius-config/freeradius/certs_2/xpextensions
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#  File containing the OIDs required for Windows.
-#
-#  http://support.microsoft.com/kb/814394/en-us
-#
-[ xpclient_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.2
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-[ xpserver_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.1
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-#
-#  Add this to the PKCS#7 keybag attributes holding the client's private key
-#  for machine authentication.
-#
-#  the presence of this OID tells Windows XP that the cert is intended
-#  for use by the computer itself, and not by an end-user.
-#
-#  The other solution is to use Microsoft's web certificate server
-#  to generate these certs.
-#
-# 1.3.6.1.4.1.311.17.2
diff --git a/src/test/setup/radius-config/freeradius/certs_3/01.pem b/src/test/setup/radius-config/freeradius/certs_3/01.pem
deleted file mode 100644
index ea92e54..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/01.pem
+++ /dev/null
@@ -1,70 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 1 (0x1)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Jun  6 21:12:27 2016 GMT
-            Not After : Jun  1 21:12:27 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:9e:ca:94:59:9c:35:4c:84:93:99:02:ec:7c:a4:
-                    60:4c:b4:60:97:89:01:9a:0e:45:4d:c5:69:71:de:
-                    b9:e8:b8:78:ee:be:49:bc:30:4f:7e:2c:00:48:8c:
-                    ed:36:b7:48:0e:7e:67:6e:ac:7f:ba:21:78:91:fe:
-                    64:a7:30:6e:9c:41:d3:1f:89:f6:1f:33:7c:1f:c4:
-                    34:c0:89:ba:cf:71:f9:8b:4b:d2:ef:e9:7b:df:0b:
-                    5b:04:8e:40:fb:cf:a4:08:b5:e4:ab:40:16:a5:47:
-                    bc:90:c8:04:fc:d8:f2:05:0a:27:a7:c4:6c:c2:9a:
-                    a2:3c:f8:c6:fe:ff:d7:67:3c:aa:99:15:c2:52:b3:
-                    8f:ff:77:58:3c:06:66:03:24:fd:ab:e1:a3:cb:a9:
-                    6d:f9:e5:37:21:02:23:49:5f:61:c5:2b:fd:75:ac:
-                    d5:2c:27:9d:7c:24:46:2b:4c:6d:01:bd:a8:51:2a:
-                    9d:d7:03:53:30:c6:52:07:4e:62:5c:aa:d0:57:28:
-                    30:17:e6:c0:2a:8b:86:49:97:85:ba:fc:cb:d0:b0:
-                    67:9b:a0:ee:3a:14:32:7a:fd:6a:9b:bb:f9:75:9c:
-                    a5:c3:ab:a2:64:f0:2b:5c:24:cc:df:d1:6a:42:8c:
-                    ca:7c:5e:06:96:59:79:d8:18:26:5e:b2:e3:b3:6b:
-                    8f:df
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         1d:65:7f:32:5b:2d:60:5d:17:ee:c5:e1:92:f2:cf:38:7b:f7:
-         cb:92:a2:5c:06:b2:bd:34:96:68:15:91:8c:85:92:f4:cc:af:
-         7a:b7:9c:10:2b:26:da:b6:5e:e4:66:01:8c:ad:9c:8f:bc:02:
-         9a:88:12:e2:2f:47:70:68:a5:b3:f1:df:6b:7f:82:d2:76:52:
-         fe:c0:2c:2c:cd:2d:26:2c:8a:52:f7:92:35:ce:50:5f:5b:26:
-         f0:bd:ef:ac:bc:fd:87:f7:87:37:d7:2b:56:9a:5a:14:b4:97:
-         b4:df:b4:95:c8:7b:76:49:a1:4b:5b:f7:10:4d:f1:b5:16:99:
-         f1:19
------BEGIN CERTIFICATE-----
-MIIDSTCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCeypRZnDVMhJOZAux8pGBMtGCXiQGaDkVNxWlx
-3rnouHjuvkm8ME9+LABIjO02t0gOfmdurH+6IXiR/mSnMG6cQdMfifYfM3wfxDTA
-ibrPcfmLS9Lv6XvfC1sEjkD7z6QIteSrQBalR7yQyAT82PIFCienxGzCmqI8+Mb+
-/9dnPKqZFcJSs4//d1g8BmYDJP2r4aPLqW355TchAiNJX2HFK/11rNUsJ518JEYr
-TG0BvahRKp3XA1MwxlIHTmJcqtBXKDAX5sAqi4ZJl4W6/MvQsGeboO46FDJ6/Wqb
-u/l1nKXDq6Jk8CtcJMzf0WpCjMp8XgaWWXnYGCZesuOza4/fAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAHWV/
-MlstYF0X7sXhkvLPOHv3y5KiXAayvTSWaBWRjIWS9MyverecECsm2rZe5GYBjK2c
-j7wCmogS4i9HcGils/Hfa3+C0nZS/sAsLM0tJiyKUveSNc5QX1sm8L3vrLz9h/eH
-N9crVppaFLSXtN+0lch7dkmhS1v3EE3xtRaZ8Rk=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/02.pem b/src/test/setup/radius-config/freeradius/certs_3/02.pem
deleted file mode 100644
index fd66778..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/02.pem
+++ /dev/null
@@ -1,58 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 2 (0x2)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Jun  6 21:12:27 2016 GMT
-            Not After : Jun  1 21:12:27 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (1024 bit)
-                Modulus:
-                    00:c2:f5:e2:4b:36:fd:2d:9e:9c:ee:e3:73:89:47:
-                    ca:be:81:ce:ef:0b:bf:ba:21:42:e5:85:29:5d:b9:
-                    95:1a:e1:99:8b:36:d5:ae:7c:b4:c6:74:7c:e4:37:
-                    de:fb:d4:78:76:26:a7:b1:f0:e1:22:1c:ce:52:5d:
-                    57:8c:dd:d8:0d:e4:92:f4:e7:85:e5:85:8d:34:4f:
-                    17:0e:19:73:d9:dd:eb:57:36:8d:ea:12:21:76:8b:
-                    41:91:48:e0:ad:47:b0:8d:38:39:38:54:77:d5:01:
-                    32:1b:7b:fc:c5:1d:c2:2e:08:84:f7:14:04:2e:36:
-                    5b:48:0d:3b:a4:3e:fd:ce:e5
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         c0:8e:4a:d8:ea:d0:c2:86:62:9b:be:bf:30:e8:3b:bf:b7:cb:
-         c7:8d:30:a8:08:8a:1c:1d:33:74:ab:35:8e:79:bd:58:b0:01:
-         97:f3:df:93:ad:62:2e:3b:57:45:c9:87:7e:67:42:82:3c:32:
-         81:e6:3f:f2:82:69:7d:35:af:80:92:54:98:01:52:48:8e:f9:
-         73:5c:a6:6b:39:a3:e6:85:9a:83:b9:f8:be:ad:75:ad:8b:fb:
-         ad:56:a6:38:54:c5:b6:f8:72:82:9d:7a:77:ee:a5:9e:b8:52:
-         c6:c9:1d:79:d7:d6:35:77:a1:7f:e5:7c:ea:9a:f6:f0:51:1b:
-         84:ba
------BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/Makefile b/src/test/setup/radius-config/freeradius/certs_3/Makefile
deleted file mode 100644
index c8f0892..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/Makefile
+++ /dev/null
@@ -1,140 +0,0 @@
-######################################################################
-#
-#	Make file to be installed in /etc/raddb/certs to enable
-#	the easy creation of certificates.
-#
-#	See the README file in this directory for more information.
-#
-#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
-#
-######################################################################
-
-DH_KEY_SIZE	= 1024
-
-#
-#  Set the passwords
-#
-PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
-
-USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
-CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
-
-######################################################################
-#
-#  Make the necessary files, but not client certificates.
-#
-######################################################################
-.PHONY: all
-all: index.txt serial dh random server ca client
-
-.PHONY: client
-client: client.pem
-
-.PHONY: ca
-ca: ca.der
-
-.PHONY: server
-server: server.pem server.vrfy
-
-######################################################################
-#
-#  Diffie-Hellman parameters
-#
-######################################################################
-dh:
-	openssl dhparam -out dh $(DH_KEY_SIZE)
-
-######################################################################
-#
-#  Create a new self-signed CA certificate
-#
-######################################################################
-ca.key ca.pem: ca.cnf
-	@[ -f index.txt ] || $(MAKE) index.txt
-	@[ -f serial ] || $(MAKE) serial
-	openssl req -new -x509 -keyout ca.key -out ca.pem \
-		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
-
-ca.der: ca.pem
-	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
-
-######################################################################
-#
-#  Create a new server certificate, signed by the above CA.
-#
-######################################################################
-server.csr server.key: server.cnf
-	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
-
-server.crt: server.csr ca.key ca.pem
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
-
-server.p12: server.crt
-	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-server.pem: server.p12
-	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-.PHONY: server.vrfy
-server.vrfy: ca.pem
-	@openssl verify -CAfile ca.pem server.pem
-
-######################################################################
-#
-#  Create a new client certificate, signed by the the above server
-#  certificate.
-#
-######################################################################
-client.csr client.key: client.cnf
-	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-
-client.crt: client.csr ca.pem ca.key
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-
-client.p12: client.crt
-	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-
-client.pem: client.p12
-	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-	cp client.pem $(USER_NAME).pem
-
-.PHONY: client.vrfy
-client.vrfy: ca.pem client.pem
-	c_rehash .
-	openssl verify -CApath . client.pem
-
-######################################################################
-#
-#  Miscellaneous rules.
-#
-######################################################################
-index.txt:
-	@touch index.txt
-
-serial:
-	@echo '01' > serial
-
-random:
-	@if [ -c /dev/urandom ] ; then \
-		ln -sf /dev/urandom random; \
-	else \
-		date > ./random; \
-	fi
-
-print:
-	openssl x509 -text -in server.crt
-
-printca:
-	openssl x509 -text -in ca.pem
-
-clean:
-	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
-
-#
-#	Make a target that people won't run too often.
-#
-destroycerts:
-	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
-			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_3/Makefile.orig b/src/test/setup/radius-config/freeradius/certs_3/Makefile.orig
deleted file mode 100644
index c8f0892..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/Makefile.orig
+++ /dev/null
@@ -1,140 +0,0 @@
-######################################################################
-#
-#	Make file to be installed in /etc/raddb/certs to enable
-#	the easy creation of certificates.
-#
-#	See the README file in this directory for more information.
-#
-#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
-#
-######################################################################
-
-DH_KEY_SIZE	= 1024
-
-#
-#  Set the passwords
-#
-PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
-PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
-
-USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
-CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
-
-######################################################################
-#
-#  Make the necessary files, but not client certificates.
-#
-######################################################################
-.PHONY: all
-all: index.txt serial dh random server ca client
-
-.PHONY: client
-client: client.pem
-
-.PHONY: ca
-ca: ca.der
-
-.PHONY: server
-server: server.pem server.vrfy
-
-######################################################################
-#
-#  Diffie-Hellman parameters
-#
-######################################################################
-dh:
-	openssl dhparam -out dh $(DH_KEY_SIZE)
-
-######################################################################
-#
-#  Create a new self-signed CA certificate
-#
-######################################################################
-ca.key ca.pem: ca.cnf
-	@[ -f index.txt ] || $(MAKE) index.txt
-	@[ -f serial ] || $(MAKE) serial
-	openssl req -new -x509 -keyout ca.key -out ca.pem \
-		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
-
-ca.der: ca.pem
-	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
-
-######################################################################
-#
-#  Create a new server certificate, signed by the above CA.
-#
-######################################################################
-server.csr server.key: server.cnf
-	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
-
-server.crt: server.csr ca.key ca.pem
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
-
-server.p12: server.crt
-	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-server.pem: server.p12
-	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
-
-.PHONY: server.vrfy
-server.vrfy: ca.pem
-	@openssl verify -CAfile ca.pem server.pem
-
-######################################################################
-#
-#  Create a new client certificate, signed by the the above server
-#  certificate.
-#
-######################################################################
-client.csr client.key: client.cnf
-	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-
-client.crt: client.csr ca.pem ca.key
-	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-
-client.p12: client.crt
-	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-
-client.pem: client.p12
-	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
-	cp client.pem $(USER_NAME).pem
-
-.PHONY: client.vrfy
-client.vrfy: ca.pem client.pem
-	c_rehash .
-	openssl verify -CApath . client.pem
-
-######################################################################
-#
-#  Miscellaneous rules.
-#
-######################################################################
-index.txt:
-	@touch index.txt
-
-serial:
-	@echo '01' > serial
-
-random:
-	@if [ -c /dev/urandom ] ; then \
-		ln -sf /dev/urandom random; \
-	else \
-		date > ./random; \
-	fi
-
-print:
-	openssl x509 -text -in server.crt
-
-printca:
-	openssl x509 -text -in ca.pem
-
-clean:
-	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
-
-#
-#	Make a target that people won't run too often.
-#
-destroycerts:
-	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
-			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_3/README b/src/test/setup/radius-config/freeradius/certs_3/README
deleted file mode 100644
index f7e0591..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/README
+++ /dev/null
@@ -1,226 +0,0 @@
-  This directory contains scripts to create the server certificates.
-To make a set of default (i.e. test) certificates, simply type:
-
-$ ./bootstrap
-
-  The "openssl" command will be run against the sample configuration
-files included here, and will make a self-signed certificate authority
-(i.e. root CA), and a server certificate.  This "root CA" should be
-installed on any client machine needing to do EAP-TLS, PEAP, or
-EAP-TTLS.
-
-  The Microsoft "XP Extensions" will be automatically included in the
-server certificate.  Without those extensions Windows clients will
-refuse to authenticate to FreeRADIUS.
-
-  The root CA and the "XP Extensions" file also contain a crlDistributionPoints
-attribute. The latest release of Windows Phone needs this to be present
-for the handset to validate the RADIUS server certificate. The RADIUS
-server must have the URI defined but the CA need not have...however it
-is best practice for a CA to have a revocation URI. Note that whilst
-the Windows Mobile client cannot actually use the CRL when doing 802.1X
-it is recommended that the URI be an actual working URL and contain a
-revocation format file as there may be other OS behaviour at play and
-future OSes that may do something with that URI.
-
-  In general, you should use self-signed certificates for 802.1x (EAP)
-authentication.  When you list root CAs from other organisations in
-the "ca_file", you permit them to masquerade as you, to authenticate
-your users, and to issue client certificates for EAP-TLS.
-
-  If FreeRADIUS was configured to use OpenSSL, then simply starting
-the server in root in debugging mode should also create test
-certificates, i.e.:
-
-$ radiusd -X
-
-  That will cause the EAP-TLS module to run the "bootstrap" script in
-this directory.  The script will be executed only once, the first time
-the server has been installed on a particular machine.  This bootstrap
-script SHOULD be run on installation of any pre-built binary package
-for your OS.  In any case, the script will ensure that it is not run
-twice, and that it does not over-write any existing certificates.
-
-  If you already have CA and server certificates, rename (or delete)
-this directory, and create a new "certs" directory containing your
-certificates.  Note that the "make install" command will NOT
-over-write your existing "raddb/certs" directory, which means that the
-"bootstrap" command will not be run.
-
-
-		NEW INSTALLATIONS OF FREERADIUS
-
-
-  We suggest that new installations use the test certificates for
-initial tests, and then create real certificates to use for normal
-user authentication.  See the instructions below for how to create the
-various certificates.  The old test certificates can be deleted by
-running the following command:
-
-$ rm -f *.pem *.der *.csr *.crt *.key *.p12 serial* index.txt*
-
-  Then, follow the instructions below for creating real certificates.
-
-  Once the final certificates have been created, you can delete the
-"bootstrap" command from this directory, and delete the
-"make_cert_command" configuration from the "tls" sub-section of
-eap.conf.
-
-  If you do not want to enable EAP-TLS, PEAP, or EAP-TTLS, then delete
-the relevant sub-sections from the "eap.conf" file.
-
-
-		MAKING A ROOT CERTIFICATE
-
-
-$ vi ca.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the CA certificate.
-
-  Edit the [certificate_authority] section to have the correct values
-  for your country, state, etc.
-
-$ make ca.pem
-
-  This step creates the CA certificate.
-
-$ make ca.der
-
-  This step creates the DER format of the self-signed certificate,
-  which is can be imported into Windows.
-
-
-		MAKING A SERVER CERTIFICATE
-
-
-$ vi server.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the server certificate.
-
-  Edit the [server] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  different from the commonName for the CA certificate.
-
-$ make server.pem
-
-  This step creates the server certificate.
-
-  If you have an existing certificate authority, and wish to create a
-  certificate signing request for the server certificate, edit
-  server.cnf as above, and type the following command.
-
-$ make server.csr
-
-  You will have to ensure that the certificate contains the XP
-  extensions needed by Microsoft clients.
-
-
-		MAKING A CLIENT CERTIFICATE
-
-
-  Client certificates are used by EAP-TLS, and optionally by EAP-TTLS
-and PEAP.  The following steps outline how to create a client
-certificate that is signed by the server certificate created above.
-You will have to have the password for the server certificate in the
-"input_password" and "output_password" fields of the server.cnf file.
-
-
-$ vi client.cnf
-
-  Edit the "input_password" and "output_password" fields to be the
-  password for the client certificate.  You will have to give these
-  passwords to the end user who will be using the certificates.
-
-  Edit the [client] section to have the correct values for your
-  country, state, etc.  Be sure that the commonName field here is
-  the User-Name that will be used for logins!
-
-$ make client.pem
-
-  The users certificate will be in "emailAddress.pem",
-  i.e. "user@example.com.pem".
-
-  To create another client certificate, just repeat the steps for
-  making a client certificate, being sure to enter a different login
-  name for "commonName", and a different password.
-
-
-		PERFORMANCE
-
-
-  EAP performance for EAP-TLS, TTLS, and PEAP is dominated by SSL
-  calculations.  That is, a normal system can handle PAP
-  authentication at a rate of 10k packets/s.  However, SSL involves
-  RSA calculations, which are very expensive.  To benchmark your system,
-  do:
-
-$ openssl speed rsa
-
-  or
-
-$ openssl speed rsa2048
-
-  to test 2048 bit keys.
-
-  A 1GHz system will likely do 30 calculations/s.  A 2GHz system may
-  do 50 calculations/s, or more.  That number is also the number of
-  authentications/s that can be done for EAP-TLS (or TTLS, or PEAP).
-
-
-		COMPATIBILITY
-
-The certificates created using this method are known to be compatible
-with ALL operating systems.  Some common issues are:
-
-  - Windows requires certain OIDs in the certificates.  If it doesn't
-    see them, it will stop doing EAP.  The most visible effect is
-    that the client starts EAP, gets a few Access-Challenge packets,
-    and then a little while later re-starts EAP.  If this happens, see
-    the FAQ, and the comments in raddb/eap.conf for how to fix it.
-
-  - Windows requires the root certificates to be on the client PC.
-    If it doesn't have them, you will see the same issue as above.
-
-  - Windows XP post SP2 has a bug where it has problems with
-    certificate chains.  i.e. if the server certificate is an
-    intermediate one, and not a root one, then authentication will
-    silently fail, as above.
-
-  - Some versions of Windows CE cannot handle 4K RSA certificates.
-    They will (again) silently fail, as above.
-
-  - In none of these cases will Windows give the end user any
-    reasonable error message describing what went wrong.  This leads
-    people to blame the RADIUS server.  That blame is misplaced.
-
-  - Certificate chains of more than 64K bytes are known to not work.
-    This is a problem in FreeRADIUS.  However, most clients cannot
-    handle 64K certificate chains.  Most Access Points will shut down
-    the EAP session after about 50 round trips, while 64K certificate
-    chains will take about 60 round trips.  So don't use large
-    certificate chains.  They will only work after everyone upgrade
-    everything in the network.
-
-  - All other operating systems are known to work with EAP and
-    FreeRADIUS.  This includes Linux, *BSD, Mac OS X, Solaris,
-    Symbian, along with all known embedded systems, phones, WiFi
-    devices, etc.
-
-  - Someone needs to ask Microsoft to please stop making life hard for
-    their customers.
-
-
-		SECURITY CONSIDERATIONS
-
-The default certificate configuration files uses MD5 for message
-digests, to maintain compatibility with network equipment that
-supports only this algorithm.
-
-MD5 has known weaknesses and is discouraged in favour of SHA1 (see
-http://www.kb.cert.org/vuls/id/836068 for details). If your network
-equipment supports the SHA1 signature algorithm, we recommend that you
-change the "ca.cnf", "server.cnf", and "client.cnf" files to specify
-the use of SHA1 for the certificates. To do this, change the
-'default_md' entry in those files from 'md5' to 'sha1'.
diff --git a/src/test/setup/radius-config/freeradius/certs_3/bootstrap b/src/test/setup/radius-config/freeradius/certs_3/bootstrap
deleted file mode 100755
index 82f93ec..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/bootstrap
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/sh
-#
-#  This is a wrapper script to create default certificates when the
-#  server first starts in debugging mode.  Once the certificates have been
-#  created, this file should be deleted.
-#
-#  Ideally, this program should be run as part of the installation of any
-#  binary package.  The installation should also ensure that the permissions
-#  and owners are correct for the files generated by this script.
-#
-#  $Id: c9d939beac8d5bdc21ea1ff9233442f9ab933297 $
-#
-umask 027
-cd `dirname $0`
-
-make -h > /dev/null 2>&1
-
-#
-#  If we have a working "make", then use it.  Otherwise, run the commands
-#  manually.
-#
-if [ "$?" = "0" ]; then
-  make all
-  exit $?
-fi
-
-#
-#  The following commands were created by running "make -n", and edited
-#  to remove the trailing backslash, and to add "exit 1" after the commands.
-#
-#  Don't edit the following text.  Instead, edit the Makefile, and
-#  re-generate these commands.
-#
-if [ ! -f dh ]; then
-  openssl dhparam -out dh 1024 || exit 1
-  if [ -e /dev/urandom ] ; then
-	ln -sf /dev/urandom random
-  else
-	date > ./random;
-  fi
-fi
-
-if [ ! -f server.key ]; then
-  openssl req -new  -out server.csr -keyout server.key -config ./server.cnf || exit 1
-fi
-
-if [ ! -f ca.key ]; then
-  openssl req -new -x509 -keyout ca.key -out ca.pem -days `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'` -config ./ca.cnf || exit 1
-fi
-
-if [ ! -f index.txt ]; then
-  touch index.txt
-fi
-
-if [ ! -f serial ]; then
-  echo '01' > serial
-fi
-
-if [ ! -f server.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf || exit 1
-fi
-
-if [ ! -f server.p12 ]; then
-  openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-fi
-
-if [ ! -f server.pem ]; then
-  openssl pkcs12 -in server.p12 -out server.pem -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
-  openssl verify -CAfile ca.pem server.pem || exit 1
-fi
-
-if [ ! -f ca.der ]; then
-  openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der || exit 1
-fi
-
-if [ ! -f client.key ]; then
-  openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
-fi
-
-if [ ! -f client.crt ]; then
-  openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
-fi
diff --git a/src/test/setup/radius-config/freeradius/certs_3/ca.cnf b/src/test/setup/radius-config/freeradius/certs_3/ca.cnf
deleted file mode 100644
index 1235124..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/ca.cnf
+++ /dev/null
@@ -1,62 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= certificate_authority
-default_bits		= 1024
-input_password		= whatever
-output_password		= whatever
-x509_extensions		= v3_ca
-
-[certificate_authority]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Certificate Authority"
-
-[v3_ca]
-subjectKeyIdentifier	= hash
-authorityKeyIdentifier	= keyid:always,issuer:always
-basicConstraints	= CA:true
-crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
-
diff --git a/src/test/setup/radius-config/freeradius/certs_3/ca.der b/src/test/setup/radius-config/freeradius/certs_3/ca.der
deleted file mode 100644
index 1c35967..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/ca.der
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_3/ca.key b/src/test/setup/radius-config/freeradius/certs_3/ca.key
deleted file mode 100644
index 54943c2..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/ca.key
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXQIBAAKBgQDJUqpU2cd6KM+kFfLM/o4hxPkQqn4YQyM/CU2CuJA6ip0Zou/t
-54ck6voSoJ3y26rh74leAwM0uK7OA1VnmwgtdIYQ3Fy4FYVzxzdQykcXhQwB41zY
-23IpHQxlgiyzJbTYD4R4ykA7r8KusQTmkr+CtPvZr94TWZgp2JESDhIV2QIDAQAB
-AoGAZeg4XFcYyFdcM/0AOJAqNiHUyYDj7zgc4U2+NItkRp3fxBhcRxrJqABhsYgo
-K8TSDSgJatMvjkJmCdZaJuWtDx3sfNkUhQRsj+LK0A+HZ8+vxWnRYrgbeaZcD7QU
-LCPCKd2hpstrCW40218DjipjQ5rf4ZOyT4wJfPWE/YXPx5kCQQD+zyizdUGloFOS
-nlyk5udFhE/sp3Bgctg6U7UElhBulL1VqJL1p8PEfgX89aTzN7jhgC/wBOhcooRp
-fGmHT0HjAkEAykOEnYLdU6eK62e2n9TXjqmIb0FzNfnasfAA6jZXtS2scBbZ/Ae5
-VUvG+FDnCvQx/NGUVKF5KFIKYOHLxV2mEwJAJmfnBJWJ3YL61cCUyi4bcbjtqwfZ
-wk/NE42GXTiZJXG5z+1mqSwuL7GnCWelAxvE/AgsMYsr0rqUJqof5csmbQJBAJmk
-lj1sJiIguAc//lsFPEUThR82MYVD6ZuW+6ruYsuvCTkQMsAfQeqIKHmCQy9v9fTd
-Yvayvi0hvN6BRuAC1l8CQQCAvdyISjg0kPpJ/LhDcEcWLwk94jQc2Ek/8isDdylL
-5Ubk96pmEhaiAgdBp94tyaqw43IEqYqv80lE9KTLe3T5
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/ca.pem b/src/test/setup/radius-config/freeradius/certs_3/ca.pem
deleted file mode 100644
index 3f9f95f..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/ca.pem
+++ /dev/null
@@ -1,23 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDxDCCAy2gAwIBAgIJALONXjUjVRn0MA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
-ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjA2MDYy
-MTEyMjdaFw0xNzA2MDEyMTEyMjdaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
-Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
-yVKqVNnHeijPpBXyzP6OIcT5EKp+GEMjPwlNgriQOoqdGaLv7eeHJOr6EqCd8tuq
-4e+JXgMDNLiuzgNVZ5sILXSGENxcuBWFc8c3UMpHF4UMAeNc2NtyKR0MZYIssyW0
-2A+EeMpAO6/CrrEE5pK/grT72a/eE1mYKdiREg4SFdkCAwEAAaOCASwwggEoMB0G
-A1UdDgQWBBSIE0e45Nu8yfaKVWMWh33UOIH6UzCBwAYDVR0jBIG4MIG1gBSIE0e4
-5Nu8yfaKVWMWh33UOIH6U6GBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
-AkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAc
-BgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBD
-ZXJ0aWZpY2F0ZSBBdXRob3JpdHmCCQCzjV41I1UZ9DAMBgNVHRMEBTADAQH/MDYG
-A1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9j
-YS5jcmwwDQYJKoZIhvcNAQELBQADgYEAS5i9vLrlq824mUHlQx+wSTX+z4G/hB3p
-s4oruGnmNtKh98qUVncySYPDmIMRNLAAGiZm2VXTeGzgWl5nvROeQDIY2SfNoaLT
-VOjaTDWSeAIXwbyxAad6cvu0XHa3piI4Pg1vzA7PofGBsV/IpO4EoUv1CerRLCuU
-ex88tU+lHhw=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.cnf b/src/test/setup/radius-config/freeradius/certs_3/client.cnf
deleted file mode 100644
index b3cbdd2..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.cnf
+++ /dev/null
@@ -1,53 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/ca.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/ca.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= client
-default_bits		= 1024
-input_password		= whatever
-output_password		= whatever
-
-[client]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= user@ciena.com
-commonName		= user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.crt b/src/test/setup/radius-config/freeradius/certs_3/client.crt
deleted file mode 100644
index fd66778..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.crt
+++ /dev/null
@@ -1,58 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 2 (0x2)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Jun  6 21:12:27 2016 GMT
-            Not After : Jun  1 21:12:27 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (1024 bit)
-                Modulus:
-                    00:c2:f5:e2:4b:36:fd:2d:9e:9c:ee:e3:73:89:47:
-                    ca:be:81:ce:ef:0b:bf:ba:21:42:e5:85:29:5d:b9:
-                    95:1a:e1:99:8b:36:d5:ae:7c:b4:c6:74:7c:e4:37:
-                    de:fb:d4:78:76:26:a7:b1:f0:e1:22:1c:ce:52:5d:
-                    57:8c:dd:d8:0d:e4:92:f4:e7:85:e5:85:8d:34:4f:
-                    17:0e:19:73:d9:dd:eb:57:36:8d:ea:12:21:76:8b:
-                    41:91:48:e0:ad:47:b0:8d:38:39:38:54:77:d5:01:
-                    32:1b:7b:fc:c5:1d:c2:2e:08:84:f7:14:04:2e:36:
-                    5b:48:0d:3b:a4:3e:fd:ce:e5
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Client Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         c0:8e:4a:d8:ea:d0:c2:86:62:9b:be:bf:30:e8:3b:bf:b7:cb:
-         c7:8d:30:a8:08:8a:1c:1d:33:74:ab:35:8e:79:bd:58:b0:01:
-         97:f3:df:93:ad:62:2e:3b:57:45:c9:87:7e:67:42:82:3c:32:
-         81:e6:3f:f2:82:69:7d:35:af:80:92:54:98:01:52:48:8e:f9:
-         73:5c:a6:6b:39:a3:e6:85:9a:83:b9:f8:be:ad:75:ad:8b:fb:
-         ad:56:a6:38:54:c5:b6:f8:72:82:9d:7a:77:ee:a5:9e:b8:52:
-         c6:c9:1d:79:d7:d6:35:77:a1:7f:e5:7c:ea:9a:f6:f0:51:1b:
-         84:ba
------BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.csr b/src/test/setup/radius-config/freeradius/certs_3/client.csr
deleted file mode 100644
index 1b48939..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.csr
+++ /dev/null
@@ -1,12 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBuzCCASQCAQAwezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
-EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHTAbBgkqhkiG9w0BCQEW
-DnVzZXJAY2llbmEuY29tMRcwFQYDVQQDFA51c2VyQGNpZW5hLmNvbTCBnzANBgkq
-hkiG9w0BAQEFAAOBjQAwgYkCgYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUp
-XbmVGuGZizbVrny0xnR85Dfe+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8X
-Dhlz2d3rVzaN6hIhdotBkUjgrUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07
-pD79zuUCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4GBAAK1SEtJD81KQraGLJq2nzpw
-e8ORfVMaYvsh1P29K5bDedaZ9nEAAxZPDgXwT2jrGAgOSEzAgQ3FXviPbZqoSNNx
-0TK7+1E2oaiKJCx8quOVfmAW2+1Tt+hXBkjxuY35+iF8MQTS0HNw8SpC5gJwUr4h
-E24o/u65+RDAiElfrdrQ
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.key b/src/test/setup/radius-config/freeradius/certs_3/client.key
deleted file mode 100644
index fbacb78..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.key
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQDC9eJLNv0tnpzu43OJR8q+gc7vC7+6IULlhSlduZUa4ZmLNtWu
-fLTGdHzkN9771Hh2Jqex8OEiHM5SXVeM3dgN5JL054XlhY00TxcOGXPZ3etXNo3q
-EiF2i0GRSOCtR7CNODk4VHfVATIbe/zFHcIuCIT3FAQuNltIDTukPv3O5QIDAQAB
-AoGBAJha7NgYhevzqvIov25Fs1QDP0Kh7Ne5DH0u/e+nirUyHfqkBILSI7d+6uay
-Hsiv9t9mP+CXvGgbGMVW+oc0CpUbZw4Y64jZhg/vakMuHVhpgUCyPyzjk+7Z7STg
-2B1DEAxILApU8azjrDBIRHM8q0CH6NFwJPpFjg2oi7li6hPhAkEA56e/UT7Mh+57
-qWb2q9CuI+unQcav1tqxRxUtrGHl0YSO5YTWCnaT7vVFUSbemwUhEHJs8h+Qw41L
-g4eBu/qXLQJBANdy7puiDBBvV8XxQms14VRAEUUpCwqmzieG3RNmgr7wYRKyXzws
-hbgp5HIkGFIM4FOIrFj5jUP6CuF2BfoYaZkCQGRIny75w6s413nfY/u/TBOqyW5V
-J/wYElSWW35bpxTLkNzVY5+F88ankUlvTUDIuKaZEobCmXW+bilTeRs6gUUCQGeo
-2Lzw3rUZnTWTus0yg1Ox751C/hkF4LKL5NpsvAN6THpecAvXsA7HuS5hx4HSyCvo
-2mOEzj8ikxGfY4jNLiECQE09wQ39Gw3oGKCzdsTcWy8PXIWjOS44+7N/GjUB52+o
-CK7BGBOdZGZUSFc1rVA7eWKzxFDZ+EK264z6DL95mRw=
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.p12 b/src/test/setup/radius-config/freeradius/certs_3/client.p12
deleted file mode 100644
index 7d9e2b5..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_3/client.pem b/src/test/setup/radius-config/freeradius/certs_3/client.pem
deleted file mode 100644
index 284a484..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/client.pem
+++ /dev/null
@@ -1,39 +0,0 @@
-Bag Attributes
-    localKeyID: F6 7E 1B 99 53 80 BE 9D A3 BA 7A E8 F5 36 5C DB 4B F8 F1 AD 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: F6 7E 1B 99 53 80 BE 9D A3 BA 7A E8 F5 36 5C DB 4B F8 F1 AD 
-Key Attributes: <No Attributes>
------BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQDC9eJLNv0tnpzu43OJR8q+gc7vC7+6IULlhSlduZUa4ZmLNtWu
-fLTGdHzkN9771Hh2Jqex8OEiHM5SXVeM3dgN5JL054XlhY00TxcOGXPZ3etXNo3q
-EiF2i0GRSOCtR7CNODk4VHfVATIbe/zFHcIuCIT3FAQuNltIDTukPv3O5QIDAQAB
-AoGBAJha7NgYhevzqvIov25Fs1QDP0Kh7Ne5DH0u/e+nirUyHfqkBILSI7d+6uay
-Hsiv9t9mP+CXvGgbGMVW+oc0CpUbZw4Y64jZhg/vakMuHVhpgUCyPyzjk+7Z7STg
-2B1DEAxILApU8azjrDBIRHM8q0CH6NFwJPpFjg2oi7li6hPhAkEA56e/UT7Mh+57
-qWb2q9CuI+unQcav1tqxRxUtrGHl0YSO5YTWCnaT7vVFUSbemwUhEHJs8h+Qw41L
-g4eBu/qXLQJBANdy7puiDBBvV8XxQms14VRAEUUpCwqmzieG3RNmgr7wYRKyXzws
-hbgp5HIkGFIM4FOIrFj5jUP6CuF2BfoYaZkCQGRIny75w6s413nfY/u/TBOqyW5V
-J/wYElSWW35bpxTLkNzVY5+F88ankUlvTUDIuKaZEobCmXW+bilTeRs6gUUCQGeo
-2Lzw3rUZnTWTus0yg1Ox751C/hkF4LKL5NpsvAN6THpecAvXsA7HuS5hx4HSyCvo
-2mOEzj8ikxGfY4jNLiECQE09wQ39Gw3oGKCzdsTcWy8PXIWjOS44+7N/GjUB52+o
-CK7BGBOdZGZUSFc1rVA7eWKzxFDZ+EK264z6DL95mRw=
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_3/dh b/src/test/setup/radius-config/freeradius/certs_3/dh
deleted file mode 100644
index d7a3d42..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/dh
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN DH PARAMETERS-----
-MIGHAoGBAMpAOQ7EDU3k70JRJm9vtzlw09rpbH0CUZDppg0EeCc6iqVsP3FSy8hO
-fMQ/1Wp2froiJ3V8FyhlwkZPye4np0B5r95qUgEKUrt8kkmjD4WlS3QeXfTePAtS
-f3UcHa3MHsZ8ep1uv9WXuzGHA7c/dBHdr1m01j+Kky6wYH/1nnCrAgEC
------END DH PARAMETERS-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/index.txt b/src/test/setup/radius-config/freeradius/certs_3/index.txt
deleted file mode 100644
index 4dfe249..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/index.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-V	170601211227Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-V	170601211227Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr b/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr
deleted file mode 100644
index 8f7e63a..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr.old b/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr.old
deleted file mode 100644
index 8f7e63a..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/index.txt.attr.old
+++ /dev/null
@@ -1 +0,0 @@
-unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs_3/index.txt.old b/src/test/setup/radius-config/freeradius/certs_3/index.txt.old
deleted file mode 100644
index f4394b3..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/index.txt.old
+++ /dev/null
@@ -1 +0,0 @@
-V	170601211227Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_3/serial b/src/test/setup/radius-config/freeradius/certs_3/serial
deleted file mode 100644
index 75016ea..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/serial
+++ /dev/null
@@ -1 +0,0 @@
-03
diff --git a/src/test/setup/radius-config/freeradius/certs_3/serial.old b/src/test/setup/radius-config/freeradius/certs_3/serial.old
deleted file mode 100644
index 9e22bcb..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/serial.old
+++ /dev/null
@@ -1 +0,0 @@
-02
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.cnf b/src/test/setup/radius-config/freeradius/certs_3/server.cnf
deleted file mode 100644
index 444372d..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.cnf
+++ /dev/null
@@ -1,54 +0,0 @@
-[ ca ]
-default_ca		= CA_default
-
-[ CA_default ]
-dir			= ./
-certs			= $dir
-crl_dir			= $dir/crl
-database		= $dir/index.txt
-new_certs_dir		= $dir
-certificate		= $dir/server.pem
-serial			= $dir/serial
-crl			= $dir/crl.pem
-private_key		= $dir/server.key
-RANDFILE		= $dir/.rand
-name_opt		= ca_default
-cert_opt		= ca_default
-default_days		= 360
-default_crl_days	= 300
-default_md		= sha1
-preserve		= no
-policy			= policy_match
-
-[ policy_match ]
-countryName		= match
-stateOrProvinceName	= match
-organizationName	= match
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ policy_anything ]
-countryName		= optional
-stateOrProvinceName	= optional
-localityName		= optional
-organizationName	= optional
-organizationalUnitName	= optional
-commonName		= supplied
-emailAddress		= optional
-
-[ req ]
-prompt			= no
-distinguished_name	= server
-default_bits		= 2048
-input_password		= whatever
-output_password		= whatever
-
-[server]
-countryName		= US
-stateOrProvinceName	= CA
-localityName		= Somewhere
-organizationName	= Ciena Inc.
-emailAddress		= admin@ciena.com
-commonName		= "Example Server Certificate"
-
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.crt b/src/test/setup/radius-config/freeradius/certs_3/server.crt
deleted file mode 100644
index ea92e54..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.crt
+++ /dev/null
@@ -1,70 +0,0 @@
-Certificate:
-    Data:
-        Version: 3 (0x2)
-        Serial Number: 1 (0x1)
-    Signature Algorithm: sha1WithRSAEncryption
-        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
-        Validity
-            Not Before: Jun  6 21:12:27 2016 GMT
-            Not After : Jun  1 21:12:27 2017 GMT
-        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
-        Subject Public Key Info:
-            Public Key Algorithm: rsaEncryption
-                Public-Key: (2048 bit)
-                Modulus:
-                    00:9e:ca:94:59:9c:35:4c:84:93:99:02:ec:7c:a4:
-                    60:4c:b4:60:97:89:01:9a:0e:45:4d:c5:69:71:de:
-                    b9:e8:b8:78:ee:be:49:bc:30:4f:7e:2c:00:48:8c:
-                    ed:36:b7:48:0e:7e:67:6e:ac:7f:ba:21:78:91:fe:
-                    64:a7:30:6e:9c:41:d3:1f:89:f6:1f:33:7c:1f:c4:
-                    34:c0:89:ba:cf:71:f9:8b:4b:d2:ef:e9:7b:df:0b:
-                    5b:04:8e:40:fb:cf:a4:08:b5:e4:ab:40:16:a5:47:
-                    bc:90:c8:04:fc:d8:f2:05:0a:27:a7:c4:6c:c2:9a:
-                    a2:3c:f8:c6:fe:ff:d7:67:3c:aa:99:15:c2:52:b3:
-                    8f:ff:77:58:3c:06:66:03:24:fd:ab:e1:a3:cb:a9:
-                    6d:f9:e5:37:21:02:23:49:5f:61:c5:2b:fd:75:ac:
-                    d5:2c:27:9d:7c:24:46:2b:4c:6d:01:bd:a8:51:2a:
-                    9d:d7:03:53:30:c6:52:07:4e:62:5c:aa:d0:57:28:
-                    30:17:e6:c0:2a:8b:86:49:97:85:ba:fc:cb:d0:b0:
-                    67:9b:a0:ee:3a:14:32:7a:fd:6a:9b:bb:f9:75:9c:
-                    a5:c3:ab:a2:64:f0:2b:5c:24:cc:df:d1:6a:42:8c:
-                    ca:7c:5e:06:96:59:79:d8:18:26:5e:b2:e3:b3:6b:
-                    8f:df
-                Exponent: 65537 (0x10001)
-        X509v3 extensions:
-            X509v3 Extended Key Usage: 
-                TLS Web Server Authentication
-            X509v3 CRL Distribution Points: 
-
-                Full Name:
-                  URI:http://www.example.com/example_ca.crl
-
-    Signature Algorithm: sha1WithRSAEncryption
-         1d:65:7f:32:5b:2d:60:5d:17:ee:c5:e1:92:f2:cf:38:7b:f7:
-         cb:92:a2:5c:06:b2:bd:34:96:68:15:91:8c:85:92:f4:cc:af:
-         7a:b7:9c:10:2b:26:da:b6:5e:e4:66:01:8c:ad:9c:8f:bc:02:
-         9a:88:12:e2:2f:47:70:68:a5:b3:f1:df:6b:7f:82:d2:76:52:
-         fe:c0:2c:2c:cd:2d:26:2c:8a:52:f7:92:35:ce:50:5f:5b:26:
-         f0:bd:ef:ac:bc:fd:87:f7:87:37:d7:2b:56:9a:5a:14:b4:97:
-         b4:df:b4:95:c8:7b:76:49:a1:4b:5b:f7:10:4d:f1:b5:16:99:
-         f1:19
------BEGIN CERTIFICATE-----
-MIIDSTCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCeypRZnDVMhJOZAux8pGBMtGCXiQGaDkVNxWlx
-3rnouHjuvkm8ME9+LABIjO02t0gOfmdurH+6IXiR/mSnMG6cQdMfifYfM3wfxDTA
-ibrPcfmLS9Lv6XvfC1sEjkD7z6QIteSrQBalR7yQyAT82PIFCienxGzCmqI8+Mb+
-/9dnPKqZFcJSs4//d1g8BmYDJP2r4aPLqW355TchAiNJX2HFK/11rNUsJ518JEYr
-TG0BvahRKp3XA1MwxlIHTmJcqtBXKDAX5sAqi4ZJl4W6/MvQsGeboO46FDJ6/Wqb
-u/l1nKXDq6Jk8CtcJMzf0WpCjMp8XgaWWXnYGCZesuOza4/fAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAHWV/
-MlstYF0X7sXhkvLPOHv3y5KiXAayvTSWaBWRjIWS9MyverecECsm2rZe5GYBjK2c
-j7wCmogS4i9HcGils/Hfa3+C0nZS/sAsLM0tJiyKUveSNc5QX1sm8L3vrLz9h/eH
-N9crVppaFLSXtN+0lch7dkmhS1v3EE3xtRaZ8Rk=
------END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.csr b/src/test/setup/radius-config/freeradius/certs_3/server.csr
deleted file mode 100644
index 3b0b246..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.csr
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICzjCCAbYCAQAwgYgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UE
-BxMJU29tZXdoZXJlMRMwEQYDVQQKEwpDaWVuYSBJbmMuMR4wHAYJKoZIhvcNAQkB
-Fg9hZG1pbkBjaWVuYS5jb20xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRp
-ZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnsqUWZw1TIST
-mQLsfKRgTLRgl4kBmg5FTcVpcd656Lh47r5JvDBPfiwASIztNrdIDn5nbqx/uiF4
-kf5kpzBunEHTH4n2HzN8H8Q0wIm6z3H5i0vS7+l73wtbBI5A+8+kCLXkq0AWpUe8
-kMgE/NjyBQonp8RswpqiPPjG/v/XZzyqmRXCUrOP/3dYPAZmAyT9q+Gjy6lt+eU3
-IQIjSV9hxSv9dazVLCedfCRGK0xtAb2oUSqd1wNTMMZSB05iXKrQVygwF+bAKouG
-SZeFuvzL0LBnm6DuOhQyev1qm7v5dZylw6uiZPArXCTM39FqQozKfF4Glll52Bgm
-XrLjs2uP3wIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAGkM8qrENLFwbDbLRynH
-NlZtLcbRjSV7YfCIBRqakX/LRsKg/7tUesT9nlSHC0AQdPKpYi2qWaoHWYrcTiW6
-YNpL7N96tFQqo048XV0AZbimLeM/XIdvP7bAR2p5AGX7yGFLswcxgLvw1Gkkam16
-3u4yIWjVchl8MuxcdUo3wCLHgW181Z53BfpWQyhFOiwKtRvamZgfCgKYP87fMrlk
-zg0s0PIJEWdt3O0H+WMLvjt6pZNrfFt/J61cJmZ3ltbsJtkbMn+D/X6IzVXQPRIQ
-o1lE65CfzNrqwYfwU5e+VEixXiuMjS+W9+I/MCM79Xrg5bANAtqzVWs08AQ3/Krp
-sSY=
------END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.key b/src/test/setup/radius-config/freeradius/certs_3/server.key
deleted file mode 100644
index 42c203e..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAnsqUWZw1TISTmQLsfKRgTLRgl4kBmg5FTcVpcd656Lh47r5J
-vDBPfiwASIztNrdIDn5nbqx/uiF4kf5kpzBunEHTH4n2HzN8H8Q0wIm6z3H5i0vS
-7+l73wtbBI5A+8+kCLXkq0AWpUe8kMgE/NjyBQonp8RswpqiPPjG/v/XZzyqmRXC
-UrOP/3dYPAZmAyT9q+Gjy6lt+eU3IQIjSV9hxSv9dazVLCedfCRGK0xtAb2oUSqd
-1wNTMMZSB05iXKrQVygwF+bAKouGSZeFuvzL0LBnm6DuOhQyev1qm7v5dZylw6ui
-ZPArXCTM39FqQozKfF4Glll52BgmXrLjs2uP3wIDAQABAoIBAHdEIPjNYxr92P5s
-O8PCkrIKKSKoPRPA9Dzdsb1dVLV/GRX4xtkaUBqsYeocL2+RagW422c9GRJsYWsG
-cANDVVD3/+MIdFwrB2e0rwqOaEm3iX8ejGi2LdvxY4OgdR6tbr88H2ygzmGF6BO2
-qEhhiiZO4koYNrhT/Nde49hTwS5AK742M5UjLDoytbhWr7/UiPA5dXZPVgLYp/C/
-y71S8n8DkZd0KA0EFAnirVVeQ8+bhBHd/7V0UVeQgOcwt2bvTX/lM9vB+mfRNgpK
-de8XLFVzoFwIB2Zmun25XHCnh2m9u+UjYnOsScpqGW9lrMJU0CF47FHHws2heS1j
-ZWWCk0ECgYEAz/FAnb9FiC0NVPPh3/jisoLUCtQr+YmL/7woWBh9XTX0u89L9egS
-uCsCYKRQU68Av/gZH0OGe7nUOnS9XS3ox21YuWi56A1Dt0fE3sY/fakHscB+DI4y
-RpWe3Sq7fNw2dD6+zVwkyxzijx72rTFsuE5fbi4Wu+GfVxqWcpF6k9UCgYEAw31X
-0S5ATHwrxWXzeFFh6PI0TYlrAEOrv2VPTnjTdO0hSRTlGQIyw2CTJsPLKhnzGcoE
-YWR+bc65YYyRHFvdg1z2AzkxTjlDKPDDDIHsN/8ioGOkgdHrfHPj+IYomCK2gTiK
-b9ACWa4eJie1WuCa1jFxOwesME8R4h6PF8I/kuMCgYEAlGs03KMeBPFrF5yvNsaV
-QCbDJCuGa6iyRUlOXnq0WChf0wFFzXHkkpEYNFRzhRde6KYzgC4ZOgxMH6EdW/md
-sWIptsS6FJgLIjFWok9MF+viswuarGl+FwVKC8ApbBhdwaDbXQaMuHCYgODv3nRt
-jitaT/EThuymZ2xxv7p4jDUCgYAcBK3+dCOREsbvPuJ7XyaqTXSDnGtymUBXaTzJ
-oXWlL4tyvwTq5a7C4+eTScKnoRp/mr2v+8hHErbtLUlrCBuihUNZSOwxKvHeP0ul
-UkW57N5YoI9mb+DRcIa/CvNsgqUntiPZTLXZhqQZkd1jTgfDJUj2Te426k5E215k
-6FlWQwKBgHzyo9wJlVVa7H6rMrbC4DciHzlHd1b5Ua+KMhUcQevkI28yr/S4P0kQ
-xzv3TqyiH1Zs9YT3+cXbx4fQbJMCIT1M0m2a5wqctS4fx+dvCldMtZBvTlvg5dK4
-+xq1HPyYnldr4Q/XBIL12dCVH2EC5p3lXiUhOzyTVNvh1CM0SPIV
------END RSA PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.p12 b/src/test/setup/radius-config/freeradius/certs_3/server.p12
deleted file mode 100644
index da9f861..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.p12
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_3/server.pem b/src/test/setup/radius-config/freeradius/certs_3/server.pem
deleted file mode 100644
index cb5396b..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/server.pem
+++ /dev/null
@@ -1,54 +0,0 @@
-Bag Attributes
-    localKeyID: 42 D6 81 81 00 AD 68 D4 EF 8C 30 42 2E 38 B1 D1 73 E5 42 58 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIIDSTCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
-YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCeypRZnDVMhJOZAux8pGBMtGCXiQGaDkVNxWlx
-3rnouHjuvkm8ME9+LABIjO02t0gOfmdurH+6IXiR/mSnMG6cQdMfifYfM3wfxDTA
-ibrPcfmLS9Lv6XvfC1sEjkD7z6QIteSrQBalR7yQyAT82PIFCienxGzCmqI8+Mb+
-/9dnPKqZFcJSs4//d1g8BmYDJP2r4aPLqW355TchAiNJX2HFK/11rNUsJ518JEYr
-TG0BvahRKp3XA1MwxlIHTmJcqtBXKDAX5sAqi4ZJl4W6/MvQsGeboO46FDJ6/Wqb
-u/l1nKXDq6Jk8CtcJMzf0WpCjMp8XgaWWXnYGCZesuOza4/fAgMBAAGjTzBNMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
-ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAHWV/
-MlstYF0X7sXhkvLPOHv3y5KiXAayvTSWaBWRjIWS9MyverecECsm2rZe5GYBjK2c
-j7wCmogS4i9HcGils/Hfa3+C0nZS/sAsLM0tJiyKUveSNc5QX1sm8L3vrLz9h/eH
-N9crVppaFLSXtN+0lch7dkmhS1v3EE3xtRaZ8Rk=
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: 42 D6 81 81 00 AD 68 D4 EF 8C 30 42 2E 38 B1 D1 73 E5 42 58 
-Key Attributes: <No Attributes>
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAnsqUWZw1TISTmQLsfKRgTLRgl4kBmg5FTcVpcd656Lh47r5J
-vDBPfiwASIztNrdIDn5nbqx/uiF4kf5kpzBunEHTH4n2HzN8H8Q0wIm6z3H5i0vS
-7+l73wtbBI5A+8+kCLXkq0AWpUe8kMgE/NjyBQonp8RswpqiPPjG/v/XZzyqmRXC
-UrOP/3dYPAZmAyT9q+Gjy6lt+eU3IQIjSV9hxSv9dazVLCedfCRGK0xtAb2oUSqd
-1wNTMMZSB05iXKrQVygwF+bAKouGSZeFuvzL0LBnm6DuOhQyev1qm7v5dZylw6ui
-ZPArXCTM39FqQozKfF4Glll52BgmXrLjs2uP3wIDAQABAoIBAHdEIPjNYxr92P5s
-O8PCkrIKKSKoPRPA9Dzdsb1dVLV/GRX4xtkaUBqsYeocL2+RagW422c9GRJsYWsG
-cANDVVD3/+MIdFwrB2e0rwqOaEm3iX8ejGi2LdvxY4OgdR6tbr88H2ygzmGF6BO2
-qEhhiiZO4koYNrhT/Nde49hTwS5AK742M5UjLDoytbhWr7/UiPA5dXZPVgLYp/C/
-y71S8n8DkZd0KA0EFAnirVVeQ8+bhBHd/7V0UVeQgOcwt2bvTX/lM9vB+mfRNgpK
-de8XLFVzoFwIB2Zmun25XHCnh2m9u+UjYnOsScpqGW9lrMJU0CF47FHHws2heS1j
-ZWWCk0ECgYEAz/FAnb9FiC0NVPPh3/jisoLUCtQr+YmL/7woWBh9XTX0u89L9egS
-uCsCYKRQU68Av/gZH0OGe7nUOnS9XS3ox21YuWi56A1Dt0fE3sY/fakHscB+DI4y
-RpWe3Sq7fNw2dD6+zVwkyxzijx72rTFsuE5fbi4Wu+GfVxqWcpF6k9UCgYEAw31X
-0S5ATHwrxWXzeFFh6PI0TYlrAEOrv2VPTnjTdO0hSRTlGQIyw2CTJsPLKhnzGcoE
-YWR+bc65YYyRHFvdg1z2AzkxTjlDKPDDDIHsN/8ioGOkgdHrfHPj+IYomCK2gTiK
-b9ACWa4eJie1WuCa1jFxOwesME8R4h6PF8I/kuMCgYEAlGs03KMeBPFrF5yvNsaV
-QCbDJCuGa6iyRUlOXnq0WChf0wFFzXHkkpEYNFRzhRde6KYzgC4ZOgxMH6EdW/md
-sWIptsS6FJgLIjFWok9MF+viswuarGl+FwVKC8ApbBhdwaDbXQaMuHCYgODv3nRt
-jitaT/EThuymZ2xxv7p4jDUCgYAcBK3+dCOREsbvPuJ7XyaqTXSDnGtymUBXaTzJ
-oXWlL4tyvwTq5a7C4+eTScKnoRp/mr2v+8hHErbtLUlrCBuihUNZSOwxKvHeP0ul
-UkW57N5YoI9mb+DRcIa/CvNsgqUntiPZTLXZhqQZkd1jTgfDJUj2Te426k5E215k
-6FlWQwKBgHzyo9wJlVVa7H6rMrbC4DciHzlHd1b5Ua+KMhUcQevkI28yr/S4P0kQ
-xzv3TqyiH1Zs9YT3+cXbx4fQbJMCIT1M0m2a5wqctS4fx+dvCldMtZBvTlvg5dK4
-+xq1HPyYnldr4Q/XBIL12dCVH2EC5p3lXiUhOzyTVNvh1CM0SPIV
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_3/user@ciena.com.pem b/src/test/setup/radius-config/freeradius/certs_3/user@ciena.com.pem
deleted file mode 100644
index 87c934b..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/user@ciena.com.pem
+++ /dev/null
@@ -1,41 +0,0 @@
-Bag Attributes
-    localKeyID: F6 7E 1B 99 53 80 BE 9D A3 BA 7A E8 F5 36 5C DB 4B F8 F1 AD 
-subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
-issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
------BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----
-Bag Attributes
-    localKeyID: F6 7E 1B 99 53 80 BE 9D A3 BA 7A E8 F5 36 5C DB 4B F8 F1 AD 
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIICxjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIi6kHFx9NOKQCAggA
-MBQGCCqGSIb3DQMHBAhfAxfmWmuO8ASCAoDmot3MgUJcnLSE5Xmj1AWjCEnNgo1p
-/QgG5Pc1HfWvBTMlE4gqYkJuJlGm5IymJcb8ICZP+v8NLq5QNqquZZFohVh27Ow6
-1kVftnK7Jb9WQwlzSMqNkseCG33w8546KVsZtR3MEBo3eLu3WvNOFOJXnHvx3Llq
-4J4jrADcj6Ux3774IAowFadt4h6L3vx4UrjDUScwN+v5Oxr8p7nvajvC6JjIzkdL
-wm30RYSPoj7F/tO4773A11gOxFsPji4CcmTVFVIG/PcoQMOu36IO3JTDjBLVD39Z
-xtGMFFC3tbSdzPf4QOmpx45bk3l9HLqHFJgfuS2kLXx7zvSvhe5kaAzcgI6/fmps
-EyHVaa5Q/M986/q5BAJUGqE8L3bnYm5KASzvL45+bj+juyA5olKVlVs3n6gbjcd5
-kXonq1cBVNWEL/+mj2hfmzY4JVdzE14dihV5cu298KWn/8AfoTUQCZOg+ZpIXXCe
-6v1wyS9AvFH+RQ9zbPNdrB8zrDubqb8jajr/uE+PgG5ocFfRCaqlutrFg7Yoa/+x
-wtyl24y1F11kKhukLJqaCQZ3EYcSa2SIbor/hUn4zp6CwoE4UM0pjsddRB5e3sdf
-QhmBSKOUKmDjB1FVF8sTzp+dNhDX+ukL8PB7NlnAzPs2afQ8mHwWvXKjwoKPi2/f
-tRYeA8WpytF2GJnY+hS1mjunsDApmVMthJ9omT5NkqA2MAYLTzPrHaEJgnscH3Vj
-tOg315IS6vAflbg4lyuIs8xgNYfVJTbGL905Od3hhRmOZyK8D62E9cJL5mNh/n9J
-lNsYYp2mltrSCezDQ9nseeuJ6laa080vFE514TTGNw8jtyamj84/9XGz
------END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_3/xpextensions b/src/test/setup/radius-config/freeradius/certs_3/xpextensions
deleted file mode 100644
index 8e4a9a2..0000000
--- a/src/test/setup/radius-config/freeradius/certs_3/xpextensions
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#  File containing the OIDs required for Windows.
-#
-#  http://support.microsoft.com/kb/814394/en-us
-#
-[ xpclient_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.2
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-[ xpserver_ext]
-extendedKeyUsage = 1.3.6.1.5.5.7.3.1
-crlDistributionPoints = URI:http://www.example.com/example_ca.crl
-
-#
-#  Add this to the PKCS#7 keybag attributes holding the client's private key
-#  for machine authentication.
-#
-#  the presence of this OID tells Windows XP that the cert is intended
-#  for use by the computer itself, and not by an end-user.
-#
-#  The other solution is to use Microsoft's web certificate server
-#  to generate these certs.
-#
-# 1.3.6.1.4.1.311.17.2
diff --git a/src/test/setup/radius-config/freeradius/clients.conf b/src/test/setup/radius-config/freeradius/clients.conf
deleted file mode 100644
index c03f512..0000000
--- a/src/test/setup/radius-config/freeradius/clients.conf
+++ /dev/null
@@ -1,1661 +0,0 @@
-# -*- text -*-
-##
-## clients.conf -- client configuration directives
-##
-##	$Id: 81f450102d9f1a3bc72264ab8d06543591fcab98 $
-
-#######################################################################
-#
-#  Define RADIUS clients (usually a NAS, Access Point, etc.).
-
-#
-#  Defines a RADIUS client.
-#
-#  '127.0.0.1' is another name for 'localhost'.  It is enabled by default,
-#  to allow testing of the server after an initial installation.  If you
-#  are not going to be permitting RADIUS queries from localhost, we suggest
-#  that you delete, or comment out, this entry.
-#
-#
-
-#
-#  Each client has a "short name" that is used to distinguish it from
-#  other clients.
-#
-#  In version 1.x, the string after the word "client" was the IP
-#  address of the client.  In 2.0, the IP address is configured via
-#  the "ipaddr" or "ipv6addr" fields.  For compatibility, the 1.x
-#  format is still accepted.
-#
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password 
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client 0.0.0.0/0{
-	secret = radius_password
-}
-client ipv6{
-	ipv6addr = ::
-	secret = radius_password
-}
-client localhost {
-	#  Allowed values are:
-	#	dotted quad (1.2.3.4)
-	#	   hostname	(radius.example.com)
-	ipaddr = 127.0.0.1
-
-	#  OR, you can use an IPv6 address, but not both
-	#  at the same time.
-#	ipv6addr = ::	# any.  ::1 == localhost
-
-	#
-	#  The transport protocol.
-	#
-	#  If unspecified, defaults to "udp", which is the traditional
-	#  RADIUS transport.  It may also be "tcp", in which case the
-	#  server will accept connections from this client ONLY over TCP.
-	#
-	proto = *
-
-	#
-	#  A note on DNS:  We STRONGLY recommend using IP addresses
-	#  rather than host names.  Using host names means that the
-	#  server will do DNS lookups when it starts, making it
-	#  dependent on DNS.  i.e. If anything goes wrong with DNS,
-	#  the server won't start!
-	#
-	#  The server also looks up the IP address from DNS once, and
-	#  only once, when it starts.  If the DNS record is later
-	#  updated, the server WILL NOT see that update.
-	#
-
-	#  One client definition can be applied to an entire network.
-	#  e.g. 127/8 should be defined with "ipaddr = 127.0.0.0" and
-	#  "netmask = 8"
-	#
-	#  If not specified, the default netmask is 32 (i.e. /32)
-	#
-	#  We do NOT recommend using anything other than 32.  There
-	#  are usually other, better ways to achieve the same goal.
-	#  Using netmasks of other than 32 can cause security issues.
-	#
-	#  You can specify overlapping networks (127/8 and 127.0/16)
-	#  In that case, the smallest possible network will be used
-	#  as the "best match" for the client.
-	#
-	#  Clients can also be defined dynamically at run time, based
-	#  on any criteria.  e.g. SQL lookups, keying off of NAS-Identifier,
-	#  etc.
-	#  See raddb/sites-available/dynamic-clients for details.
-	#
-
-#	netmask = 32
-
-	#
-	#  The shared secret use to "encrypt" and "sign" packets between
-	#  the NAS and FreeRADIUS.  You MUST change this secret from the
-	#  default, otherwise it's not a secret any more!
-	#
-	#  The secret can be any string, up to 8k characters in length.
-	#
-	#  Control codes can be entered vi octal encoding,
-	#	e.g. "\101\102" == "AB"
-	#  Quotation marks can be entered by escaping them,
-	#	e.g. "foo\"bar"
-	#
-	#  A note on security:  The security of the RADIUS protocol
-	#  depends COMPLETELY on this secret!  We recommend using a
-	#  shared secret that is composed of:
-	#
-	#	upper case letters
-	#	lower case letters
-	#	numbers
-	#
-	#  And is at LEAST 8 characters long, preferably 16 characters in
-	#  length.  The secret MUST be random, and should not be words,
-	#  phrase, or anything else that is recognisable.
-	#
-	#  The default secret below is only for testing, and should
-	#  not be used in any real environment.
-	#
-	secret = radius_password radius_password 
-
-	#
-	#  Old-style clients do not send a Message-Authenticator
-	#  in an Access-Request.  RFC 5080 suggests that all clients
-	#  SHOULD include it in an Access-Request.  The configuration
-	#  item below allows the server to require it.  If a client
-	#  is required to include a Message-Authenticator and it does
-	#  not, then the packet will be silently discarded.
-	#
-	#  allowed values: yes, no
-	require_message_authenticator = no
-
-	#
-	#  The short name is used as an alias for the fully qualified
-	#  domain name, or the IP address.
-	#
-	#  It is accepted for compatibility with 1.x, but it is no
-	#  longer necessary in 2.0
-	#
-#	shortname = localhost
-
-	#
-	# the following three fields are optional, but may be used by
-	# checkrad.pl for simultaneous use checks
-	#
-
-	#
-	# The nas_type tells 'checkrad.pl' which NAS-specific method to
-	#  use to query the NAS for simultaneous use.
-	#
-	#  Permitted NAS types are:
-	#
-	#	cisco
-	#	computone
-	#	livingston
-	#	juniper
-	#	max40xx
-	#	multitech
-	#	netserver
-	#	pathras
-	#	patton
-	#	portslave
-	#	tc
-	#	usrhiper
-	#	other		# for all other types
-
-	#
-	nas_type	 = other	# localhost isn't usually a NAS...
-
-	#
-	#  The following two configurations are for future use.
-	#  The 'naspasswd' file is currently used to store the NAS
-	#  login name and password, which is used by checkrad.pl
-	#  when querying the NAS for simultaneous use.
-	#
-#	login	   = !root
-#	password	= someadminpas
-
-	#
-	#  As of 2.0, clients can also be tied to a virtual server.
-	#  This is done by setting the "virtual_server" configuration
-	#  item, as in the example below.
-	#
-#	virtual_server = home1
-
-	#
-	#  A pointer to the "home_server_pool" OR a "home_server"
-	#  section that contains the CoA configuration for this
-	#  client.  For an example of a coa home server or pool,
-	#  see raddb/sites-available/originate-coa
-#	coa_server = coa
-
-	#
-	#  Connection limiting for clients using "proto = tcp".
-	#
-	#  This section is ignored for clients sending UDP traffic
-	#
-	limit {
-		#
-		#  Limit the number of simultaneous TCP connections from a client
-		#
-		#  The default is 16.
-		#  Setting this to 0 means "no limit"
-		max_connections = 16
-
-		#  The per-socket "max_requests" option does not exist.
-
-		#
-		#  The lifetime, in seconds, of a TCP connection.  After
-		#  this lifetime, the connection will be closed.
-		#
-		#  Setting this to 0 means "forever".
-		lifetime = 0
-
-		#
-		#  The idle timeout, in seconds, of a TCP connection.
-		#  If no packets have been received over the connection for
-		#  this time, the connection will be closed.
-		#
-		#  Setting this to 0 means "no timeout".
-		#
-		#  We STRONGLY RECOMMEND that you set an idle timeout.
-		#
-		idle_timeout = 30
-	}
-}
-
-# IPv6 Client
-#client ::1 {
-#	secret		= 
-#	shortname	= localhost
-#}
-#
-# All IPv6 Site-local clients
-#client fe80::/16 {
-#	secret		= 
-#	shortname	= localhost
-#}
-
-#client some.host.org {
-#	secret		= 
-#	shortname	= localhost
-#}
-
-#
-#  You can now specify one secret for a network of clients.
-#  When a client request comes in, the BEST match is chosen.
-#  i.e. The entry from the smallest possible network.
-#
-#client 192.0.2.0/24 {
-#	secret		= -1
-#	shortname	= private-network-1
-#}
-#
-#client 198.51.100.0/24 {
-#	secret		= -2
-#	shortname	= private-network-2
-#}
-
-
-#client 203.0.113.1 {
-#	# secret and password are mapped through the "secrets" file.
-#	secret		= 
-#	shortname	= liv1
-#}
-
-client 172.17.0.0/16 {
-	# secret and password are mapped through the "secrets" file.
-	secret		= radius_password
-	shortname	= auth-test
-}
-# The following three fields are optional, but may be used by
-# checkrad.pl for simultaneous usage checks
-
-#	nas_type	= livingston
-#	login		= !root
-#	password	= someadminpas
-#}
-
-#######################################################################
-#
-#  Per-socket client lists.  The configuration entries are exactly
-#  the same as above, but they are nested inside of a section.
-#
-#  You can have as many per-socket client lists as you have "listen"
-#  sections, or you can re-use a list among multiple "listen" sections.
-#
-#  Un-comment this section, and edit a "listen" section to add:
-#  "clients = per_socket_clients".  That IP address/port combination
-#  will then accept ONLY the clients listed in this section.
-#
-#clients per_socket_clients {
-#	client 192.0.2.4 {
-#		secret = radius_password radius_password 
-#	}
-#}
diff --git a/src/test/setup/radius-config/freeradius/clients.conf.orig b/src/test/setup/radius-config/freeradius/clients.conf.orig
deleted file mode 100644
index f9ad206..0000000
--- a/src/test/setup/radius-config/freeradius/clients.conf.orig
+++ /dev/null
@@ -1,314 +0,0 @@
-# -*- text -*-
-##
-## clients.conf -- client configuration directives
-##
-##	$Id: 81f450102d9f1a3bc72264ab8d06543591fcab98 $
-
-#######################################################################
-#
-#  Define RADIUS clients (usually a NAS, Access Point, etc.).
-
-#
-#  Defines a RADIUS client.
-#
-#  '127.0.0.1' is another name for 'localhost'.  It is enabled by default,
-#  to allow testing of the server after an initial installation.  If you
-#  are not going to be permitting RADIUS queries from localhost, we suggest
-#  that you delete, or comment out, this entry.
-#
-#
-
-#
-#  Each client has a "short name" that is used to distinguish it from
-#  other clients.
-#
-#  In version 1.x, the string after the word "client" was the IP
-#  address of the client.  In 2.0, the IP address is configured via
-#  the "ipaddr" or "ipv6addr" fields.  For compatibility, the 1.x
-#  format is still accepted.
-#
-client 0.0.0.0/0{
-	secret = testing123
-}
-client ipv6{
-	ipv6addr = ::
-	secret = 
-}
-client 0.0.0.0/0{
-	secret = 
-}
-client ipv6{
-	ipv6addr = ::
-	secret = 
-}
-client 0.0.0.0/0{
-	secret = testing123
-}
-client ipv6{
-	ipv6addr = ::
-	secret = testing123
-}
-client 0.0.0.0/0{
-	secret = testing123
-}
-client ipv6{
-	ipv6addr = ::
-	secret = testing123
-}
-client localhost {
-	#  Allowed values are:
-	#	dotted quad (1.2.3.4)
-	#	   hostname	(radius.example.com)
-	ipaddr = 127.0.0.1
-
-	#  OR, you can use an IPv6 address, but not both
-	#  at the same time.
-#	ipv6addr = ::	# any.  ::1 == localhost
-
-	#
-	#  The transport protocol.
-	#
-	#  If unspecified, defaults to "udp", which is the traditional
-	#  RADIUS transport.  It may also be "tcp", in which case the
-	#  server will accept connections from this client ONLY over TCP.
-	#
-	proto = *
-
-	#
-	#  A note on DNS:  We STRONGLY recommend using IP addresses
-	#  rather than host names.  Using host names means that the
-	#  server will do DNS lookups when it starts, making it
-	#  dependent on DNS.  i.e. If anything goes wrong with DNS,
-	#  the server won't start!
-	#
-	#  The server also looks up the IP address from DNS once, and
-	#  only once, when it starts.  If the DNS record is later
-	#  updated, the server WILL NOT see that update.
-	#
-
-	#  One client definition can be applied to an entire network.
-	#  e.g. 127/8 should be defined with "ipaddr = 127.0.0.0" and
-	#  "netmask = 8"
-	#
-	#  If not specified, the default netmask is 32 (i.e. /32)
-	#
-	#  We do NOT recommend using anything other than 32.  There
-	#  are usually other, better ways to achieve the same goal.
-	#  Using netmasks of other than 32 can cause security issues.
-	#
-	#  You can specify overlapping networks (127/8 and 127.0/16)
-	#  In that case, the smallest possible network will be used
-	#  as the "best match" for the client.
-	#
-	#  Clients can also be defined dynamically at run time, based
-	#  on any criteria.  e.g. SQL lookups, keying off of NAS-Identifier,
-	#  etc.
-	#  See raddb/sites-available/dynamic-clients for details.
-	#
-
-#	netmask = 32
-
-	#
-	#  The shared secret use to "encrypt" and "sign" packets between
-	#  the NAS and FreeRADIUS.  You MUST change this secret from the
-	#  default, otherwise it's not a secret any more!
-	#
-	#  The secret can be any string, up to 8k characters in length.
-	#
-	#  Control codes can be entered vi octal encoding,
-	#	e.g. "\101\102" == "AB"
-	#  Quotation marks can be entered by escaping them,
-	#	e.g. "foo\"bar"
-	#
-	#  A note on security:  The security of the RADIUS protocol
-	#  depends COMPLETELY on this secret!  We recommend using a
-	#  shared secret that is composed of:
-	#
-	#	upper case letters
-	#	lower case letters
-	#	numbers
-	#
-	#  And is at LEAST 8 characters long, preferably 16 characters in
-	#  length.  The secret MUST be random, and should not be words,
-	#  phrase, or anything else that is recognisable.
-	#
-	#  The default secret below is only for testing, and should
-	#  not be used in any real environment.
-	#
-	secret = 
-
-	#
-	#  Old-style clients do not send a Message-Authenticator
-	#  in an Access-Request.  RFC 5080 suggests that all clients
-	#  SHOULD include it in an Access-Request.  The configuration
-	#  item below allows the server to require it.  If a client
-	#  is required to include a Message-Authenticator and it does
-	#  not, then the packet will be silently discarded.
-	#
-	#  allowed values: yes, no
-	require_message_authenticator = no
-
-	#
-	#  The short name is used as an alias for the fully qualified
-	#  domain name, or the IP address.
-	#
-	#  It is accepted for compatibility with 1.x, but it is no
-	#  longer necessary in 2.0
-	#
-#	shortname = localhost
-
-	#
-	# the following three fields are optional, but may be used by
-	# checkrad.pl for simultaneous use checks
-	#
-
-	#
-	# The nas_type tells 'checkrad.pl' which NAS-specific method to
-	#  use to query the NAS for simultaneous use.
-	#
-	#  Permitted NAS types are:
-	#
-	#	cisco
-	#	computone
-	#	livingston
-	#	juniper
-	#	max40xx
-	#	multitech
-	#	netserver
-	#	pathras
-	#	patton
-	#	portslave
-	#	tc
-	#	usrhiper
-	#	other		# for all other types
-
-	#
-	nas_type	 = other	# localhost isn't usually a NAS...
-
-	#
-	#  The following two configurations are for future use.
-	#  The 'naspasswd' file is currently used to store the NAS
-	#  login name and password, which is used by checkrad.pl
-	#  when querying the NAS for simultaneous use.
-	#
-#	login	   = !root
-#	password	= someadminpas
-
-	#
-	#  As of 2.0, clients can also be tied to a virtual server.
-	#  This is done by setting the "virtual_server" configuration
-	#  item, as in the example below.
-	#
-#	virtual_server = home1
-
-	#
-	#  A pointer to the "home_server_pool" OR a "home_server"
-	#  section that contains the CoA configuration for this
-	#  client.  For an example of a coa home server or pool,
-	#  see raddb/sites-available/originate-coa
-#	coa_server = coa
-
-	#
-	#  Connection limiting for clients using "proto = tcp".
-	#
-	#  This section is ignored for clients sending UDP traffic
-	#
-	limit {
-		#
-		#  Limit the number of simultaneous TCP connections from a client
-		#
-		#  The default is 16.
-		#  Setting this to 0 means "no limit"
-		max_connections = 16
-
-		#  The per-socket "max_requests" option does not exist.
-
-		#
-		#  The lifetime, in seconds, of a TCP connection.  After
-		#  this lifetime, the connection will be closed.
-		#
-		#  Setting this to 0 means "forever".
-		lifetime = 0
-
-		#
-		#  The idle timeout, in seconds, of a TCP connection.
-		#  If no packets have been received over the connection for
-		#  this time, the connection will be closed.
-		#
-		#  Setting this to 0 means "no timeout".
-		#
-		#  We STRONGLY RECOMMEND that you set an idle timeout.
-		#
-		idle_timeout = 30
-	}
-}
-
-# IPv6 Client
-#client ::1 {
-#	secret		= 
-#	shortname	= localhost
-#}
-#
-# All IPv6 Site-local clients
-#client fe80::/16 {
-#	secret		= 
-#	shortname	= localhost
-#}
-
-#client some.host.org {
-#	secret		= 
-#	shortname	= localhost
-#}
-
-#
-#  You can now specify one secret for a network of clients.
-#  When a client request comes in, the BEST match is chosen.
-#  i.e. The entry from the smallest possible network.
-#
-#client 192.0.2.0/24 {
-#	secret		= -1
-#	shortname	= private-network-1
-#}
-#
-#client 198.51.100.0/24 {
-#	secret		= -2
-#	shortname	= private-network-2
-#}
-
-
-#client 203.0.113.1 {
-#	# secret and password are mapped through the "secrets" file.
-#	secret		= 
-#	shortname	= liv1
-#}
-
-client 172.17.0.0/16 {
-	# secret and password are mapped through the "secrets" file.
-	secret		= testing123 
-	shortname	= auth-test
-}
-# The following three fields are optional, but may be used by
-# checkrad.pl for simultaneous usage checks
-
-#	nas_type	= livingston
-#	login		= !root
-#	password	= someadminpas
-#}
-
-#######################################################################
-#
-#  Per-socket client lists.  The configuration entries are exactly
-#  the same as above, but they are nested inside of a section.
-#
-#  You can have as many per-socket client lists as you have "listen"
-#  sections, or you can re-use a list among multiple "listen" sections.
-#
-#  Un-comment this section, and edit a "listen" section to add:
-#  "clients = per_socket_clients".  That IP address/port combination
-#  will then accept ONLY the clients listed in this section.
-#
-#clients per_socket_clients {
-#	client 192.0.2.4 {
-#		secret = 
-#	}
-#}
diff --git a/src/test/setup/radius-config/freeradius/dictionary b/src/test/setup/radius-config/freeradius/dictionary
deleted file mode 100644
index 1f7dc90..0000000
--- a/src/test/setup/radius-config/freeradius/dictionary
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#	This is the local dictionary file which can be
-#	edited by local administrators.  It will be loaded
-#	AFTER the main dictionary files are loaded.
-#
-#	As of version 3.0.2, FreeRADIUS will automatically
-#	load the main dictionary files from
-#
-#		${prefix}/share/freeradius/dictionary
-#
-#	It is no longer necessary for this file to $INCLUDE
-#	the main dictionaries.  However, if the $INCLUDE
-#	line is here, nothing bad will happen.
-#
-#	Any new/changed attributes MUST be placed in this file.
-#	The pre-defined dictionaries SHOULD NOT be edited.
-#
-#	See "man dictionary" for documentation on its format.
-#
-#	$Id: eed5d70f41b314f9ed3f006a22d9f9a2be2c9516 $
-#
-
-#
-#	All local attributes and $INCLUDE's should go into
-#	this file.
-#
-
-#	If you want to add entries to the dictionary file,
-#	which are NOT going to be placed in a RADIUS packet,
-#	add them to the 'dictionary.local' file.
-#
-#	The numbers you pick should be between 3000 and 4000.
-#	These attributes will NOT go into a RADIUS packet.
-#
-#	If you want that, you will need to use VSAs.  This means
-#	requesting allocation of a Private Enterprise Code from
-#	http://iana.org.  We STRONGLY suggest doing that only if
-#	you are a vendor of RADIUS equipment.
-#
-#	See RFC 6158 for more details.
-#	http://ietf.org/rfc/rfc6158.txt
-#
-
-#
-#	These attributes are examples
-#
-#ATTRIBUTE	My-Local-String		3000	string
-#ATTRIBUTE	My-Local-IPAddr		3001	ipaddr
-#ATTRIBUTE	My-Local-Integer	3002	integer
diff --git a/src/test/setup/radius-config/freeradius/experimental.conf b/src/test/setup/radius-config/freeradius/experimental.conf
deleted file mode 100644
index e5395f3..0000000
--- a/src/test/setup/radius-config/freeradius/experimental.conf
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-#  This file contains the configuration for experimental modules.
-#
-#  By default, it is NOT included in the build.
-#
-#  $Id: 87d9744a4f0fa7b9b06b4908ddd6b7d2f1a7fd62 $
-#
-
-# Configuration for the Python module.
-#
-# Where radiusd is a Python module, radiusd.py, and the
-# function 'authorize' is called.  Here is a dummy piece
-# of code:
-#
-#	def authorize(params):
-#		print params
-#		return (5, ('Reply-Message', 'banned'))
-#
-# The RADIUS value-pairs are passed as a tuple of tuple
-# pairs as the first argument, e.g. (('attribute1',
-# 'value1'), ('attribute2', 'value2'))
-#
-# The function return is a tuple with the first element
-# being the return value of the function.
-# The 5 corresponds to RLM_MODULE_USERLOCK. I plan to
-# write the return values as Python symbols to avoid
-# confusion.
-#
-# The remaining tuple members are the string form of
-# value-pairs which are passed on to pairmake().
-#
-python {
-	mod_instantiate = radiusd_test
-	func_instantiate = instantiate
-
-	mod_authorize = radiusd_test
-	func_authorize = authorize
-
-	mod_accounting = radiusd_test
-	func_accounting = accounting
-
-	mod_pre_proxy = radiusd_test
-	func_pre_proxy = pre_proxy
-
-	mod_post_proxy = radiusd_test
-	func_post_proxy = post_proxy
-
-	mod_post_auth = radiusd_test
-	func_post_auth = post_auth
-
-	mod_recv_coa = radiusd_test
-	func_recv_coa = recv_coa
-
-	mod_send_coa = radiusd_test
-	func_send_coa = send_coa
-
-	mod_detach = radiusd_test
-	func_detach = detach
-}
-
-
-# Configuration for the example module.  Uncommenting it will cause it
-# to get loaded and initialised, but should have no real effect as long
-# it is not referenced in one of the autz/auth/preacct/acct sections
-example {
-	#  Boolean variable.
-	# allowed values: {no, yes}
-	boolean = yes
-
-	#  An integer, of any value.
-	integer = 16
-
-	#  A string.
-	string = "This is an example configuration string"
-
-	# An IP address, either in dotted quad (1.2.3.4) or hostname
-	# (example.com)
-	ipaddr = 127.0.0.1
-
-	# A subsection
-	mysubsection {
-		anotherinteger = 1000
-		# They nest
-		deeply nested {
-			string = "This is a different string"
-		}
-	}
-}
-
-#
-#  To create a dbm users file, do:
-#
-#   cat test.users | rlm_dbm_parser -f /etc/raddb/users_db
-#
-#  Then add 'dbm' in 'authorize' section.
-#
-#  Note that even if the file has a ".db" or ".dbm" extension,
-#  you may have to specify it here without that extension.  This
-#  is because the DBM libraries "helpfully" add a ".db" to the
-#  filename, but don't check if it's already there.
-#
-dbm {
-	usersfile = ${confdir}/users_db
-}
-
-# Instantiate a couple instances of the idn module
-idn {
-}
-
-# ...more commonly known as...
-idn idna {
-}
-
-idn idna_lenient {
-	UseSTD3ASCIIRules = no
-}
diff --git a/src/test/setup/radius-config/freeradius/hints b/src/test/setup/radius-config/freeradius/hints
deleted file mode 120000
index f45fc9e..0000000
--- a/src/test/setup/radius-config/freeradius/hints
+++ /dev/null
@@ -1 +0,0 @@
-mods-config/preprocess/hints
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/huntgroups b/src/test/setup/radius-config/freeradius/huntgroups
deleted file mode 120000
index c2d27ff..0000000
--- a/src/test/setup/radius-config/freeradius/huntgroups
+++ /dev/null
@@ -1 +0,0 @@
-mods-config/preprocess/huntgroups
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-available/README.rst b/src/test/setup/radius-config/freeradius/mods-available/README.rst
deleted file mode 100644
index 8ffb764..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/README.rst
+++ /dev/null
@@ -1,79 +0,0 @@
-Modules in Version 3
-====================
-
-As of Version 3, all of the modules have been places in the
-"mods-available/" directory.  This practice follows that used by other
-servers such as Nginx, Apache, etc.  The "modules" directory should
-not be used.
-
-Modules are enabled by creating a file in the mods-enabled/ directory.
-You can also create a soft-link from one directory to another::
-
-  $ cd raddb/mods-enabled
-  $ ln -s ../mods-available/foo
-
-This will enable module "foo".  Be sure that you have configured the
-module correctly before enabling it, otherwise the server will not
-start.  You can verify the server configuration by running
-"radiusd -XC".
-
-A large number of modules are enabled by default.  This allows the
-server to work with the largest number of authentication protocols.
-Please be careful when disabling modules.  You will likely need to
-edit the "sites-enabled/" files to remove references to any disabled
-modules.
-
-Conditional Modules
--------------------
-
-Version 3 allows modules to be conditionally loaded.  This is useful
-when you want to have a virtual server which references a module, but
-does not require it.  Instead of editing the virtual server file, you
-can just conditionally enable the module.
-
-Modules are conditionally enabled by adding a "-" before their name in
-a virtual server.  For example, you can do::
-
-  server {
-    authorize {
-      ...
-      ldap
-      -sql
-      ...
-    }
-  }
-
-This says "require the LDAP module, but use the SQL module only if it
-is configured."
-
-This feature is not very useful for production configurations.  It is,
-however, very useful for the default examples that ship with the
-server.
-
-Ignoring module
----------------
-
-If you see this message::
-
-  Ignoring module (see raddb/mods-available/README.rst)
-
-Then you are in the right place.  Most of the time this message can be
-ignored.  The message can be fixed by find the references to "-module"
-in the virtual server, and deleting them.
-
-Another way to fix it is to configure the module, as described above.
-
-Simplification
---------------
-
-Allowing conditional modules simplifies the default virtual servers
-that are shipped with FreeRADIUS.  This means that if you want to
-enable LDAP (for example), you no longer need to edit the files in
-raddb/sites-available/ in order to enable it.
-
-Instead, you should edit the raddb/mods-available/ldap file to point
-to your local LDAP server.  Then, enable the module via the soft-link
-method described above.
-
-Once the module is enabled, it will automatically be used in the
-default configuration.
diff --git a/src/test/setup/radius-config/freeradius/mods-available/always b/src/test/setup/radius-config/freeradius/mods-available/always
deleted file mode 100644
index bba5b79..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/always
+++ /dev/null
@@ -1,61 +0,0 @@
-# -*- text -*-
-#
-#  $Id: de3f13089d8951f4c822ebc4007df58e0487de14 $
-
-#
-#  The "always" module is here for debugging purposes, or
-#  for use in complex policies.
-#  Instance simply returns the same result, always, without
-#  doing anything.
-#
-#  rcode may be one of the following values:
-#  - reject   - Reject the user.
-#  - fail     - Simulate or indicate a failure.
-#  - ok       - Simulate or indicate a success.
-#  - handled  - Indicate that the request has been handled,
-#               stop processing, and send response if set.
-#  - invalid  - Indicate that the request is invalid.
-#  - userlock - Indicate that the user account has been
-#               locked out.
-#  - notfound - Indicate that a user account can't be found.
-#  - noop     - Simulate a no-op.
-#  - updated  - Indicate that the request has been updated.
-#
-#  If an instance is listed in a session {}  section, 
-#  this simulates a user having <integer> sessions.
-#  
-#  simulcount = <integer>
-#
-#  If an instance is listed in a session {}  section, 
-#  this simulates the user having multilink
-#  sessions.
-#
-#  mpp = <integer>
-#
-always reject {
-	rcode = reject
-}
-always fail {
-	rcode = fail
-}
-always ok {
-	rcode = ok
-}
-always handled {
-	rcode = handled
-}
-always invalid {
-	rcode = invalid
-}
-always userlock {
-	rcode = userlock
-}
-always notfound {
-	rcode = notfound
-}
-always noop {
-	rcode = noop
-}
-always updated {
-	rcode = updated
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/attr_filter b/src/test/setup/radius-config/freeradius/mods-available/attr_filter
deleted file mode 100644
index 360e230..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/attr_filter
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 1caff077b2429c948a04777fcd619be901ac83dc $
-
-#
-#  This file defines a number of instances of the "attr_filter" module.
-#
-
-# attr_filter - filters the attributes received in replies from
-# proxied servers, to make sure we send back to our RADIUS client
-# only allowed attributes.
-attr_filter attr_filter.post-proxy {
-	key = "%{Realm}"
-	filename = ${modconfdir}/${.:name}/post-proxy
-}
-
-# attr_filter - filters the attributes in the packets we send to
-# the RADIUS home servers.
-attr_filter attr_filter.pre-proxy {
-	key = "%{Realm}"
-	filename = ${modconfdir}/${.:name}/pre-proxy
-}
-
-# Enforce RFC requirements on the contents of Access-Reject
-# packets.  See the comments at the top of the file for
-# more details.
-#
-attr_filter attr_filter.access_reject {
-	key = "%{User-Name}"
-	filename = ${modconfdir}/${.:name}/access_reject
-}
-
-# Enforce RFC requirements on the contents of Access-Challenge
-# packets.  See the comments at the top of the file for
-# more details.
-#
-attr_filter attr_filter.access_challenge {
-	key = "%{User-Name}"
-	filename = ${modconfdir}/${.:name}/access_challenge
-}
-
-
-#  Enforce RFC requirements on the contents of the
-#  Accounting-Response packets.  See the comments at the
-#  top of the file for more details.
-#
-attr_filter attr_filter.accounting_response {
-	key = "%{User-Name}"
-	filename = ${modconfdir}/${.:name}/accounting_response
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cache b/src/test/setup/radius-config/freeradius/mods-available/cache
deleted file mode 100644
index e679a1f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/cache
+++ /dev/null
@@ -1,98 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 53f2169741ce8c7f78eb525ddc5a6fddf1dcc0cc $
-
-#
-#	A module to cache attributes.  The idea is that you can look
-#	up information in a database, and then cache it.  Repeated
-#	requests for the same information will then have the cached
-#	values added to the request.
-#
-#	The module can cache a fixed set of attributes per key.
-#	It can be listed in "authorize", "post-auth", "pre-proxy"
-#	and "post-proxy".
-#
-#	If you want different things cached for authorize and post-auth,
-#	you will need to define two instances of the "cache" module.
-#
-#	The module returns "ok" if it found a cache entry.
-#	The module returns "updated" if it added a new cache entry.
-#	The module returns "noop" if it did nothing.
-#
-cache {
-	#  The key used to index the cache.  It is dynamically expanded
-	#  at run time.
-	key = "%{User-Name}"
-
-	#  The TTL of cache entries, in seconds.  Entries older than this
-	#  will be expired.
-	#
-	#  You can set the TTL per cache entry, but adding a control
-	#  variable "Cache-TTL".  The value there will over-ride this one.
-	#  Setting a Cache-TTL of 0 means "delete this entry".
-	#
-	#  This value should be between 10 and 86400.
-	ttl = 10
-
-	#  You can flush the cache via
-	#
-	#	radmin -e "set module config cache epoch 123456789"
-	#
-	#  Where last value is a 32-bit Unix timestamp.  Cache entries
-	#  older than this are expired, and new entries added.
-	#
-	#  You should never set the "epoch" configuration item in
-	#  this file.
-
-	#  The module can also operate in status-only mode where it will
-	#  not add new cache entries, or merge existing ones.
-	#
-	#  To enable set the control attribute "Cache-Status-Only" to "yes"
-	#  The module will return "ok" if it found a cache entry.
-	#  The module will return "notfound" if it failed to find a cache entry,
-	#  or the entry had expired.
-	#
-	#  Note: expired entries will still be removed.
-
-	#  If yes the following attributes will be added to the request list:
-	#  	* Cache-Entry-Hits - The number of times this entry has been
-	#			     retrieved.
-	add_stats = no
-
-	#  The list of attributes to cache for a particular key.
-	#  Each key gets the same set of cached attributes.
-	#  The attributes are dynamically expanded at run time.
-	#
-	#  You can specify which list the attribute goes into by
-	#  prefixing the attribute name with the list.  This allows
-	#  you to update multiple lists with one configuration.
-	#
-	#  If no list is specified the default list will be updated.
-	#
-	#  The default list is specified in the same way as unlang update
-	#  stanzas. If no default list is set, it will default to the
-	#  request list.
-	#
-	#  Quoting around values determine how they're processed:
-	#  - double quoted values are xlat expanded.
-	#  - single quoted values are treated as literals.
-	#  - bare values are treated as attribute references.
-	#
-	#  The '+=' operator causes all instances of the reference to
-	#  be cached.
-	#
-	#  Attributes that are generated from processing the update section
-	#  are also added to the current request, as if there'd been a cache
-	#  hit.
-	update {
-		# [outer.]<list>:<attribute> <op> <value>
-
-		# Cache all instances of Reply-Message in the reply list
-		reply:Reply-Message += &reply:Reply-Message
-
-		# Add our own to show when the cache was last updated
-		reply:Reply-Message += "Cache last updated at %t"
-
-		reply:Class := "%{randstr:ssssssssssssssssssssssssssssssss}"
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cache_eap b/src/test/setup/radius-config/freeradius/mods-available/cache_eap
deleted file mode 100644
index e9a3aed..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/cache_eap
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-#	Cache EAP responses for resiliency on intermediary proxy fail-over
-#
-cache cache_eap {
-	key = "%{%{control:State}:-%{%{reply:State}:-%{State}}}"
-
-	ttl = 15
-
-	update reply {
-		reply: += &reply:
-		control:State := &request:State
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/chap b/src/test/setup/radius-config/freeradius/mods-available/chap
deleted file mode 100644
index 97d965b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/chap
+++ /dev/null
@@ -1,11 +0,0 @@
-# -*- text -*-
-#
-#  $Id: e2a3cd3b110ffffdbcff86c7fc65a9275ddc3379 $
-
-# CHAP module
-#
-#  To authenticate requests containing a CHAP-Password attribute.
-#
-chap {
-	# no configuration
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/counter b/src/test/setup/radius-config/freeradius/mods-available/counter
deleted file mode 100644
index 54a1e00..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/counter
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- text -*-
-#
-#  $Id: a5ac1e60ef117a2c59ace1a9d061d8f70d1da538 $
-
-#  counter module:
-#  This module takes an attribute (count-attribute).
-#  It also takes a key, and creates a counter for each unique
-#  key.  The count is incremented when accounting packets are
-#  received by the server.  The value of the increment depends
-#  on the attribute type.
-#  If the attribute is Acct-Session-Time or of an integer type we add
-#  the value of the attribute. If it is anything else we increase the
-#  counter by one.
-#
-#  The 'reset' parameter defines when the counters are all reset to
-#  zero.  It can be hourly, daily, weekly, monthly or never.
-#
-#  hourly: Reset on 00:00 of every hour
-#  daily: Reset on 00:00:00 every day
-#  weekly: Reset on 00:00:00 on sunday
-#  monthly: Reset on 00:00:00 of the first day of each month
-#
-#  It can also be user defined. It should be of the form:
-#  num[hdwm] where:
-#  h: hours, d: days, w: weeks, m: months
-#  If the letter is omitted days will be assumed. In example:
-#  reset = 10h (reset every 10 hours)
-#  reset = 12  (reset every 12 days)
-#
-#
-#  The check_name attribute defines an attribute which will be
-#  registered by the counter module and can be used to set the
-#  maximum allowed value for the counter after which the user
-#  is rejected.
-#  Something like:
-#
-#  DEFAULT Max-Daily-Session := 36000
-#          Fall-Through = 1
-#
-#  You should add the counter module in the instantiate
-#  section so that it registers check_name before the files
-#  module reads the users file.
-#
-#  If check_name is set and the user is to be rejected then we
-#  send back a Reply-Message and we log a Failure-Message in
-#  the radius.log
-#
-#  If the count attribute is Acct-Session-Time then on each
-#  login we send back the remaining online time as a
-#  Session-Timeout attribute ELSE and if the reply_name is
-#  set, we send back that attribute.  The reply_name attribute
-#  MUST be of an integer type.
-#
-#  The counter-name can also be used instead of using the check_name
-#  like below:
-#
-#  DEFAULT  Daily-Session-Time > 3600, Auth-Type = Reject
-#      Reply-Message = "You've used up more than one hour today"
-#
-#  The allowed_service_type attribute can be used to only take
-#  into account specific sessions. For example if a user first
-#  logs in through a login menu and then selects ppp there will
-#  be two sessions. One for Login-User and one for Framed-User
-#  service type. We only need to take into account the second one.
-#
-#  The module should be added in the instantiate, authorize and
-#  accounting sections.  Make sure that in the authorize
-#  section it comes after any module which sets the
-#  'check_name' attribute.
-#
-counter daily {
-	filename = ${db_dir}/db.daily
-	key = User-Name
-	count_attribute = Acct-Session-Time
-	reset = daily
-	counter_name = Daily-Session-Time
-	check_name = Max-Daily-Session
-	reply_name = Session-Timeout
-	allowed_service_type = Framed-User
-	cache_size = 5000
-}
-
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cui b/src/test/setup/radius-config/freeradius/mods-available/cui
deleted file mode 100644
index cb6fe5d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/cui
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- text -*-
-#
-#  $Id: b72aa309bfc05c2443e4bb2db061b8f33de8e359 $
-
-#
-#  Write Chargeable-User-Identity to the database.
-#
-#  Schema	raddb/sql/cui/<DB>/schema.sql
-#  Queries	raddb/sql/cui/<DB>/queries.conf
-#
-sql cuisql {
-
-	# The dialect of SQL you want to use, this should usually match
-	# the driver below.
-	#
-	# If you're using rlm_sql_null, then it should be the type of
-	# database the logged queries are going to be executed against.
-	dialect = "sqlite"
-
-	# The sub-module to use to execute queries. This should match
-	# the database you're attempting to connect to.
-	#
-	# There are CUI queries available for:
-	#    * rlm_sql_mysql
-	#    * rlm_sql_postgresql
-	#    * rlm_sql_sqlite
-	#    * rlm_sql_null (log queries to disk)
-	#
-	driver = "rlm_sql_${dialect}"
-
-	sqlite {
-		filename = ${radacctdir}/cui.sqlite
-		bootstrap = ${modconfdir}/${..:name}/cui/sqlite/schema.sql
-	}
-
-	# Write CUI queries to a logfile. Useful for debugging.
-#	logfile = ${logdir}/cuilog.sql
-
-	pool {
-		start = 5
-		min = 4
-		max = 10
-		spare = 3
-		uses = 0
-		lifetime = 0
-		idle_timeout = 60
-	}
-
-	cui_table = "cui"
-	sql_user_name = "%{User-Name}"
-
-	$INCLUDE ${modconfdir}/${.:name}/cui/${dialect}/queries.conf
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/date b/src/test/setup/radius-config/freeradius/mods-available/date
deleted file mode 100644
index bd4737d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/date
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-#  Registers xlat to convert between time formats.
-#
-#  xlat input string is an attribute name. If this attribute is of date
-#  or integer type, the date xlat will convert it to a time string in
-#  the format of the format config item.
-#
-#  If the attribute is a string type, date will attempt to parse it in
-#  the format specified by the format config item, and will expand
-#  to a Unix timestamp.
-#
-date {
-	format = "%b %e %Y %H:%M:%S %Z"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/detail b/src/test/setup/radius-config/freeradius/mods-available/detail
deleted file mode 100644
index e3cc38a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/detail
+++ /dev/null
@@ -1,93 +0,0 @@
-# -*- text -*-
-#
-#  $Id: f235eb9a0ab4de42f773f3aea3810d1dcde99bd1 $
-
-# Write a detailed log of all accounting records received.
-#
-detail {
-	#  Note that we do NOT use NAS-IP-Address here, as
-	#  that attribute MAY BE from the originating NAS, and
-	#  NOT from the proxy which actually sent us the
-	#  request.
-	#
-	#  The following line creates a new detail file for
-	#  every radius client (by IP address or hostname).
-	#  In addition, a new detail file is created every
-	#  day, so that the detail file doesn't have to go
-	#  through a 'log rotation'
-	#
-	#  If your detail files are large, you may also want
-	#  to add a ':%H' (see doc/variables.txt) to the end
-	#  of it, to create a new detail file every hour, e.g.:
-	#
-	#   ..../detail-%Y%m%d:%H
-	#
-	#  This will create a new detail file for every hour.
-	#
-	#  If you are reading detail files via the "listen" section
-	#  (e.g. as in raddb/sites-available/robust-proxy-accounting),
-	#  you MUST use a unique directory for each combination of a
-	#  detail file writer, and reader.  That is, there can only
-	#  be ONE "listen" section reading detail files from a
-	#  particular directory.
-	#
-	filename = ${radacctdir}/%{%{Packet-Src-IP-Address}:-%{Packet-Src-IPv6-Address}}/detail-%Y%m%d
-
-	#
-	#  If you are using radrelay, delete the above line for "file",
-	#  and use this one instead:
-	#
-#	filename = ${radacctdir}/detail
-
-	#
-	#  The Unix-style permissions on the 'detail' file.
-	#
-	#  The detail file often contains secret or private
-	#  information about users.  So by keeping the file
-	#  permissions restrictive, we can prevent unwanted
-	#  people from seeing that information.
-	permissions = 0600
-
-	# The Unix group of the log file.
-	#
-	# The user that the server runs as must be in the specified
-	# system group otherwise this will fail to work.
-	#
-#	group = ${security.group}
-
-	#
-	#  Every entry in the detail file has a header which
-	#  is a timestamp.  By default, we use the ctime
-	#  format (see "man ctime" for details).
-	#
-	#  The header can be customised by editing this
-	#  string.  See "doc/variables.txt" for a description
-	#  of what can be put here.
-	#
-	header = "%t"
-
-	#
-	#  Uncomment this line if the detail file reader will be
-	#  reading this detail file.
-	#
-#	locking = yes
-
-	#
-	#  Log the Packet src/dst IP/port.  This is disabled by
-	#  default, as that information isn't used by many people.
-	#
-#	log_packet_header = yes
-
-	#
-	# Certain attributes such as User-Password may be
-	# "sensitive", so they should not be printed in the
-	# detail file.  This section lists the attributes
-	# that should be suppressed.
-	#
-	# The attributes should be listed one to a line.
-	#
-	#suppress {
-		# User-Password
-	#}
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/detail.example.com b/src/test/setup/radius-config/freeradius/mods-available/detail.example.com
deleted file mode 100644
index 745e1f1..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/detail.example.com
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- text -*-
-#
-#  Detail file writer, used in the following examples:
-#
-#	raddb/sites-available/robust-proxy-accounting
-#	raddb/sites-available/decoupled-accounting
-#
-#  Note that this module can write detail files that are read by
-#  only ONE "listen" section.  If you use BOTH of the examples
-#  above, you will need to define TWO "detail" modules.
-#
-#  e.g. detail1.example.com && detail2.example.com
-#
-#
-#  We write *multiple* detail files here.  They will be processed by
-#  the detail "listen" section in the order that they were created.
-#  The directory containing these files should NOT be used for any
-#  other purposes.  i.e. It should have NO other files in it.
-#
-#  Writing multiple detail enables the server to process the pieces
-#  in smaller chunks.  This helps in certain catastrophic corner cases.
-#
-#  $Id: 827cdf57e70dc2ff2252016194f4bb846eecead2 $
-#
-detail detail.example.com {
-	filename = ${radacctdir}/detail.example.com/detail-%Y%m%d:%H:%G
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dhcp b/src/test/setup/radius-config/freeradius/mods-available/dhcp
deleted file mode 100644
index d4e9c85..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/dhcp
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- text -*-
-#
-#  $Id: a4316335d7f73b37ec5aa9278de91d37dd28eddc $
-
-#
-#  This module is useful only for 'xlat'.  To use it,
-#  put 'dhcp' into the 'instantiate' section.
-#
-#  %{dhcp_options:<Attribute-ref>} may be used to decode
-#  DHCP options data included in RADIUS packets by vendors
-#  of DHCP to RADIUS gateways.
-#
-#  This is known to work with the following VSAs:
-#	* Juniper		- ERX-Dhcp-Options
-#	* Alcatel lucent SR	- Alc-ToServer-Dhcp-Options
-#				- Alc-ToClient-Dhcp-Options
-#
-dhcp {
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool b/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool
deleted file mode 100644
index ed04d73..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool
+++ /dev/null
@@ -1,36 +0,0 @@
-##  Configuration for DHCP to use SQL IP Pools.
-##
-##  See sqlippool.conf for common configuration explanation
-##
-##  $Id: 8d459c06a16b77eff7b976e32838dbc1195d901f $
-
-sqlippool dhcp_sqlippool {
-	sql_instance_name = "sql"
-
-	ippool_table = "radippool"
-
-	lease_duration = 7200
-
-	# Client's MAC address is mapped to Calling-Station-Id in policy.conf
-	pool_key = "%{Calling-Station-Id}"
-
-	# For now, it works with MySQL.
-	$INCLUDE ${modconfdir}/sql/ippool-dhcp/mysql/queries.conf
-
-	# It may also work with sqlite - this is very experimental.
-	# Comment out the above line and add the following include.
-	# To use sqlite you need to add '%' to safe_characters in
-	# raddb/mods-config/sql/main/sqlite/queries.conf.
-	# $INCLUDE ${modconfdir}/sql/ippool-dhcp/sqlite/queries.conf
-
-	sqlippool_log_exists = "DHCP: Existing IP: %{reply:Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-	sqlippool_log_success = "DHCP: Allocated IP: %{reply:Framed-IP-Address} from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-	sqlippool_log_clear = "DHCP: Released IP %{Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} user %{User-Name})"
-
-	sqlippool_log_failed = "DHCP: IP Allocation FAILED from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-	sqlippool_log_nopool = "DHCP: No Pool-Name defined (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/digest b/src/test/setup/radius-config/freeradius/mods-available/digest
deleted file mode 100644
index af52017..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/digest
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- text -*-
-#
-#  $Id: f0aa9edf9da33d63fe03e7d1ed3cbca848eec54d $
-
-#
-#  The 'digest' module currently has no configuration.
-#
-#  "Digest" authentication against a Cisco SIP server.
-#  See 'doc/rfc/draft-sterman-aaa-sip-00.txt' for details
-#  on performing digest authentication for Cisco SIP servers.
-#
-digest {
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients b/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients
deleted file mode 100644
index c5c9c8a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- text -*-
-#
-#  $Id: cc2bd5fd22aa473b98af5dde3fac7a66e39a9e9d $
-
-# This module loads RADIUS clients as needed, rather than when the server
-# starts.
-#
-#  There are no configuration entries for this module.  Instead, it
-#  relies on the "client" configuration.  You must:
-#
-#	1) link raddb/sites-enabled/dynamic_clients to
-#	   raddb/sites-available/dynamic_clients
-#
-#	2) Define a client network/mask (see top of the above file)
-#
-#	3) uncomment the "directory" entry in that client definition
-#
-#	4) list "dynamic_clients" in the "authorize" section of the
-#	   "dynamic_clients' virtual server.  The default example already
-#	   does this.
-#
-#	5) put files into the above directory, one per IP.
-#	   e.g. file "192.0.2.1" should contain a normal client definition
-#	   for a client with IP address 192.0.2.1.
-#
-#  For more documentation, see the file:
-#
-#	raddb/sites-available/dynamic-clients
-#
-dynamic_clients {
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/eap b/src/test/setup/radius-config/freeradius/mods-available/eap
deleted file mode 100644
index 800c50b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/eap
+++ /dev/null
@@ -1,785 +0,0 @@
-# -*- text -*-
-##
-##  eap.conf -- Configuration for EAP types (PEAP, TTLS, etc.)
-##
-##	$Id: 0fffa886244eb9cfce13103d551b7a30f6538802 $
-
-#######################################################################
-#
-#  Whatever you do, do NOT set 'Auth-Type := EAP'.  The server
-#  is smart enough to figure this out on its own.  The most
-#  common side effect of setting 'Auth-Type := EAP' is that the
-#  users then cannot use ANY other authentication method.
-#
-#  EAP types NOT listed here may be supported via the "eap2" module.
-#  See experimental.conf for documentation.
-#
-eap {
-	#  Invoke the default supported EAP type when
-	#  EAP-Identity response is received.
-	#
-	#  The incoming EAP messages DO NOT specify which EAP
-	#  type they will be using, so it MUST be set here.
-	#
-	#  For now, only one default EAP type may be used at a time.
-	#
-	#  If the EAP-Type attribute is set by another module,
-	#  then that EAP type takes precedence over the
-	#  default type configured here.
-	#
-	default_eap_type = TLS
-	#  A list is maintained to correlate EAP-Response
-	#  packets with EAP-Request packets.  After a
-	#  configurable length of time, entries in the list
-	#  expire, and are deleted.
-	#
-	timer_expire     = 60
-
-	#  There are many EAP types, but the server has support
-	#  for only a limited subset.  If the server receives
-	#  a request for an EAP type it does not support, then
-	#  it normally rejects the request.  By setting this
-	#  configuration to "yes", you can tell the server to
-	#  instead keep processing the request.  Another module
-	#  MUST then be configured to proxy the request to
-	#  another RADIUS server which supports that EAP type.
-	#
-	#  If another module is NOT configured to handle the
-	#  request, then the request will still end up being
-	#  rejected.
-	ignore_unknown_eap_types = no
-
-	# Cisco AP1230B firmware 12.2(13)JA1 has a bug.  When given
-	# a User-Name attribute in an Access-Accept, it copies one
-	# more byte than it should.
-	#
-	# We can work around it by configurably adding an extra
-	# zero byte.
-	cisco_accounting_username_bug = no
-
-	#
-	#  Help prevent DoS attacks by limiting the number of
-	#  sessions that the server is tracking.  For simplicity,
-	#  this is taken from the "max_requests" directive in
-	#  radiusd.conf.
-	max_sessions = ${max_requests}
-
-	# Supported EAP-types
-
-	#
-	#  We do NOT recommend using EAP-MD5 authentication
-	#  for wireless connections.  It is insecure, and does
-	#  not provide for dynamic WEP keys.
-	#
-	mschapv2 {
-	}
-
-	#
-	# EAP-pwd -- secure password-based authentication
-	#
-#	pwd {
-#		group = 19
-
-		#
-#		server_id = theserver@example.com
-
-		#  This has the same meaning as for TLS.
-#		fragment_size = 1020
-
-		# The virtual server which determines the
-		# "known good" password for the user.
-		# Note that unlike TLS, only the "authorize"
-		# section is processed.  EAP-PWD requests can be
-		# distinguished by having a User-Name, but
-		# no User-Password, CHAP-Password, EAP-Message, etc.
-#		virtual_server = "inner-tunnel"
-#	}
-
-	# Cisco LEAP
-	#
-	#  We do not recommend using LEAP in new deployments.  See:
-	#  http://www.securiteam.com/tools/5TP012ACKE.html
-	#
-	#  Cisco LEAP uses the MS-CHAP algorithm (but not
-	#  the MS-CHAP attributes) to perform it's authentication.
-	#
-	#  As a result, LEAP *requires* access to the plain-text
-	#  User-Password, or the NT-Password attributes.
-	#  'System' authentication is impossible with LEAP.
-	#
-	leap {
-	}
-
-	#  Generic Token Card.
-	#
-	#  Currently, this is only permitted inside of EAP-TTLS,
-	#  or EAP-PEAP.  The module "challenges" the user with
-	#  text, and the response from the user is taken to be
-	#  the User-Password.
-	#
-	#  Proxying the tunneled EAP-GTC session is a bad idea,
-	#  the users password will go over the wire in plain-text,
-	#  for anyone to see.
-	#
-	gtc {
-		#  The default challenge, which many clients
-		#  ignore..
-		#challenge = "Password: "
-
-		#  The plain-text response which comes back
-		#  is put into a User-Password attribute,
-		#  and passed to another module for
-		#  authentication.  This allows the EAP-GTC
-		#  response to be checked against plain-text,
-		#  or crypt'd passwords.
-		#
-		#  If you say "Local" instead of "PAP", then
-		#  the module will look for a User-Password
-		#  configured for the request, and do the
-		#  authentication itself.
-		#
-		auth_type = PAP
-	}
-
-	## Common TLS configuration for TLS-based EAP types
-	#
-	#  See raddb/certs/README for additional comments
-	#  on certificates.
-	#
-	#  If OpenSSL was not found at the time the server was
-	#  built, the "tls", "ttls", and "peap" sections will
-	#  be ignored.
-	#
-	#  If you do not currently have certificates signed by
-	#  a trusted CA you may use the 'snakeoil' certificates.
-	#  Included with the server in raddb/certs.
-	#
-	#  If these certificates have not been auto-generated:
-	#    cd raddb/certs
-	#    make
-	#
-	#  These test certificates SHOULD NOT be used in a normal
-	#  deployment.  They are created only to make it easier
-	#  to install the server, and to perform some simple
-	#  tests with EAP-TLS, TTLS, or PEAP.
-	#
-	#  See also:
-	#
-	#  http://www.dslreports.com/forum/remark,9286052~mode=flat
-	#
-	#  Note that you should NOT use a globally known CA here!
-	#  e.g. using a Verisign cert as a "known CA" means that
-	#  ANYONE who has a certificate signed by them can
-	#  authenticate via EAP-TLS!  This is likely not what you want.
-	tls-config tls-common {
-		#private_key_password = 
-		private_key_file = ${certdir}/server.pem
-
-		#  If Private key & Certificate are located in
-		#  the same file, then private_key_file &
-		#  certificate_file must contain the same file
-		#  name.
-		#
-		#  If ca_file (below) is not used, then the
-		#  certificate_file below MUST include not
-		#  only the server certificate, but ALSO all
-		#  of the CA certificates used to sign the
-		#  server certificate.
-		certificate_file = ${certdir}/server.pem
-		#  Trusted Root CA list
-		#
-		#  ALL of the CA's in this list will be trusted
-		#  to issue client certificates for authentication.
-		#
-		#  In general, you should use self-signed
-		#  certificates for 802.1x (EAP) authentication.
-		#  In that case, this CA file should contain
-		#  *one* CA certificate.
-		#
-		#  This parameter is used only for EAP-TLS,
-		#  when you issue client certificates.  If you do
-		#  not use client certificates, and you do not want
-		#  to permit EAP-TLS authentication, then delete
-		#  this configuration item.
-		ca_file = ${cadir}/ca.pem
-
-		#
-		#  If OpenSSL supports TLS-PSK, then we can use
-		#  a PSK identity and (hex) password.  When the
-		#  following two configuration items are specified,
-		#  then certificate-based configuration items are
-		#  not allowed.  e.g.:
-		#
-		#	#private_key_password
-		#	private_key_file
-		#	certificate_file
-		#	ca_file
-		#	ca_path
-		#
-		#  For now, the identity is fixed, and must be the
-		#  same on the client.  The passphrase must be a hex
-		#  value, and can be up to 256 hex digits.
-		#
-		#  Future versions of the server may be able to
-		#  look up the shared key (hexphrase) based on the
-		#  identity.
-		#
-	#	psk_identity = "test"
-	#	psk_hexphrase = "036363823"
-
-		#
-		#  For DH cipher suites to work, you have to
-		#  run OpenSSL to create the DH file first:
-		#
-		#  	openssl dhparam -out certs/dh 1024
-		#
-		dh_file = ${certdir}/dh
-
-		#
-		#  If your system doesn't have /dev/urandom,
-		#  you will need to create this file, and
-		#  periodically change its contents.
-		#
-		#  For security reasons, FreeRADIUS doesn't
-		#  write to files in its configuration
-		#  directory.
-		#
-#		random_file = ${certdir}/random
-
-		#
-		#  This can never exceed the size of a RADIUS
-		#  packet (4096 bytes), and is preferably half
-		#  that, to accommodate other attributes in
-		#  RADIUS packet.  On most APs the MAX packet
-		#  length is configured between 1500 - 1600
-		#  In these cases, fragment size should be
-		#  1024 or less.
-		#
-	#	fragment_size = 1024
-
-		#  include_length is a flag which is
-		#  by default set to yes If set to
-		#  yes, Total Length of the message is
-		#  included in EVERY packet we send.
-		#  If set to no, Total Length of the
-		#  message is included ONLY in the
-		#  First packet of a fragment series.
-		#
-	#	include_length = yes
-
-		#  Check the Certificate Revocation List
-		#
-		#  1) Copy CA certificates and CRLs to same directory.
-		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
-		#    'c_rehash' is OpenSSL's command.
-		#  3) uncomment the line below.
-		#  5) Restart radiusd
-	#	check_crl = yes
-		ca_path = ${cadir}
-
-	       #
-	       #  If check_cert_issuer is set, the value will
-	       #  be checked against the DN of the issuer in
-	       #  the client certificate.  If the values do not
-	       #  match, the certificate verification will fail,
-	       #  rejecting the user.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-Issuer attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
-
-	       #
-	       #  If check_cert_cn is set, the value will
-	       #  be xlat'ed and checked against the CN
-	       #  in the client certificate.  If the values
-	       #  do not match, the certificate verification
-	       #  will fail rejecting the user.
-	       #
-	       #  This check is done only if the previous
-	       #  "check_cert_issuer" is not set, or if
-	       #  the check succeeds.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-CN attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#	check_cert_cn = %{User-Name}
-	#
-		# Set this option to specify the allowed
-		# TLS cipher suites.  The format is listed
-		# in "man 1 ciphers".
-		cipher_list = "DEFAULT"
-
-		#
-
-		#
-		#  Elliptical cryptography configuration
-		#
-		#  Only for OpenSSL >= 0.9.8.f
-		#
-		ecdh_curve = "prime256v1"
-
-		#
-		#  Session resumption / fast reauthentication
-		#  cache.
-		#
-		#  The cache contains the following information:
-		#
-		#  session Id - unique identifier, managed by SSL
-		#  User-Name  - from the Access-Accept
-		#  Stripped-User-Name - from the Access-Request
-		#  Cached-Session-Policy - from the Access-Accept
-		#
-		#  The "Cached-Session-Policy" is the name of a
-		#  policy which should be applied to the cached
-		#  session.  This policy can be used to assign
-		#  VLANs, IP addresses, etc.  It serves as a useful
-		#  way to re-apply the policy from the original
-		#  Access-Accept to the subsequent Access-Accept
-		#  for the cached session.
-		#
-		#  On session resumption, these attributes are
-		#  copied from the cache, and placed into the
-		#  reply list.
-		#
-		#  You probably also want "use_tunneled_reply = yes"
-		#  when using fast session resumption.
-		#
-		cache {
-		      #
-		      #  Enable it.  The default is "no".
-		      #  Deleting the entire "cache" subsection
-		      #  Also disables caching.
-		      #
-		      #  You can disallow resumption for a
-		      #  particular user by adding the following
-		      #  attribute to the control item list:
-		      #
-		      #		Allow-Session-Resumption = No
-		      #
-		      #  If "enable = no" below, you CANNOT
-		      #  enable resumption for just one user
-		      #  by setting the above attribute to "yes".
-		      #
-		      enable = yes
-
-		      #
-		      #  Lifetime of the cached entries, in hours.
-		      #  The sessions will be deleted after this
-		      #  time.
-		      #
-		      lifetime = 24 # hours
-
-		      #
-		      #  The maximum number of entries in the
-		      #  cache.  Set to "0" for "infinite".
-		      #
-		      #  This could be set to the number of users
-		      #  who are logged in... which can be a LOT.
-		      #
-		      max_entries = 255
-
-		      #
-		      #  Internal "name" of the session cache.
-		      #  Used to distinguish which TLS context
-		      #  sessions belong to.
-		      #
-		      #  The server will generate a random value
-		      #  if unset. This will change across server
-		      #  restart so you MUST set the "name" if you
-		      #  want to persist sessions (see below).
-		      #
-		      #name = "EAP module"
-
-		      #
-		      #  Simple directory-based storage of sessions.
-		      #  Two files per session will be written, the SSL
-		      #  state and the cached VPs. This will persist session
-		      #  across server restarts.
-		      #
-		      #  The server will need write perms, and the directory
-		      #  should be secured from anyone else. You might want
-		      #  a script to remove old files from here periodically:
-		      #
-		      #    find ${logdir}/tlscache -mtime +2 -exec rm -f {} \;
-		      #
-		      #  This feature REQUIRES "name" option be set above.
-		      #
-		      #persist_dir = "${logdir}/tlscache"
-		}
-
-		#
-		#  As of version 2.1.10, client certificates can be
-		#  validated via an external command.  This allows
-		#  dynamic CRLs or OCSP to be used.
-		#
-		#  This configuration is commented out in the
-		#  default configuration.  Uncomment it, and configure
-		#  the correct paths below to enable it.
-		#
-		verify {
-			#  A temporary directory where the client
-			#  certificates are stored.  This directory
-			#  MUST be owned by the UID of the server,
-			#  and MUST not be accessible by any other
-			#  users.  When the server starts, it will do
-			#  "chmod go-rwx" on the directory, for
-			#  security reasons.  The directory MUST
-			#  exist when the server starts.
-			#
-			#  You should also delete all of the files
-			#  in the directory when the server starts.
-	#     		tmpdir = /tmp/radiusd
-
-			#  The command used to verify the client cert.
-			#  We recommend using the OpenSSL command-line
-			#  tool.
-			#
-			#  The ${..ca_path} text is a reference to
-			#  the ca_path variable defined above.
-			#
-			#  The %{TLS-Client-Cert-Filename} is the name
-			#  of the temporary file containing the cert
-			#  in PEM format.  This file is automatically
-			#  deleted by the server when the command
-			#  returns.
-	#    		client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}"
-		}
-
-		#
-		#  OCSP Configuration
-		#  Certificates can be verified against an OCSP
-		#  Responder. This makes it possible to immediately
-		#  revoke certificates without the distribution of
-		#  new Certificate Revocation Lists (CRLs).
-		#
-		ocsp {
-		      #
-		      #  Enable it.  The default is "no".
-		      #  Deleting the entire "ocsp" subsection
-		      #  Also disables ocsp checking
-		      #
-		      enable = no
-
-		      #
-		      #  The OCSP Responder URL can be automatically
-		      #  extracted from the certificate in question.
-		      #  To override the OCSP Responder URL set
-		      #  "override_cert_url = yes".
-		      #
-		      override_cert_url = yes
-
-		      #
-		      #  If the OCSP Responder address is not
-		      #  extracted from the certificate, the
-		      #  URL can be defined here.
-
-		      #
-		      #  Limitation: Currently the HTTP
-		      #  Request is not sending the "Host: "
-		      #  information to the web-server.  This
-		      #  can be a problem if the OCSP
-		      #  Responder is running as a vhost.
-		      #
-		      url = "http://127.0.0.1/ocsp/"
-
-		      #
-		      # If the OCSP Responder can not cope with nonce
-		      # in the request, then it can be disabled here.
-		      #
-		      # For security reasons, disabling this option
-		      # is not recommended as nonce protects against
-		      # replay attacks.
-		      #
-		      # Note that Microsoft AD Certificate Services OCSP
-		      # Responder does not enable nonce by default. It is
-		      # more secure to enable nonce on the responder than
-		      # to disable it in the query here.
-		      # See http://technet.microsoft.com/en-us/library/cc770413%28WS.10%29.aspx
-		      #
-		      # use_nonce = yes
-
-		      #
-		      # Number of seconds before giving up waiting
-		      # for OCSP response. 0 uses system default.
-		      #
-		      # timeout = 0
-
-		      #
-		      # Normally an error in querying the OCSP
-		      # responder (no response from server, server did
-		      # not understand the request, etc) will result in
-		      # a validation failure.
-		      #
-		      # To treat these errors as 'soft' failures and
-		      # still accept the certificate, enable this
-		      # option.
-		      #
-		      # Warning: this may enable clients with revoked
-		      # certificates to connect if the OCSP responder
-		      # is not available. Use with caution.
-		      #
-		      # softfail = no
-		}
-	}
-
-	## EAP-TLS
-	#
-	#  As of Version 3.0, the TLS configuration for TLS-based
-	#  EAP types is above in the "tls-config" section.
-	#
-	tls {
-		# Point to the common TLS configuration
-		tls = tls-common
-
-		cipher_list = "DEFAULT"
-		#
-		# As part of checking a client certificate, the EAP-TLS
-		# sets some attributes such as TLS-Client-Cert-CN. This
-		# virtual server has access to these attributes, and can
-		# be used to accept or reject the request.
-		#
-	#	virtual_server = check-eap-tls
-	}
-
-
-	## EAP-TTLS
-	#
-	#  The TTLS module implements the EAP-TTLS protocol,
-	#  which can be described as EAP inside of Diameter,
-	#  inside of TLS, inside of EAP, inside of RADIUS...
-	#
-	#  Surprisingly, it works quite well.
-	#
-	ttls {
-		#  Which tls-config section the TLS negotiation parameters
-		#  are in - see EAP-TLS above for an explanation.
-		#
-		#  In the case that an old configuration from FreeRADIUS
-		#  v2.x is being used, all the options of the tls-config
-		#  section may also appear instead in the 'tls' section
-		#  above. If that is done, the tls= option here (and in
-		#  tls above) MUST be commented out.
-		#
-		tls = tls-common
-
-		#  The tunneled EAP session needs a default EAP type
-		#  which is separate from the one for the non-tunneled
-		#  EAP module.  Inside of the TTLS tunnel, we recommend
-		#  using EAP-MD5.  If the request does not contain an
-		#  EAP conversation, then this configuration entry is
-		#  ignored.
-		#
-		default_eap_type = mschapv2
-
-		#  The tunneled authentication request does not usually
-		#  contain useful attributes like 'Calling-Station-Id',
-		#  etc.  These attributes are outside of the tunnel,
-		#  and normally unavailable to the tunneled
-		#  authentication request.
-		#
-		#  By setting this configuration entry to 'yes',
-		#  any attribute which is NOT in the tunneled
-		#  authentication request, but which IS available
-		#  outside of the tunnel, is copied to the tunneled
-		#  request.
-		#
-		#  allowed values: {no, yes}
-		#
-		copy_request_to_tunnel = no
-
-		#  The reply attributes sent to the NAS are usually
-		#  based on the name of the user 'outside' of the
-		#  tunnel (usually 'anonymous').  If you want to send
-		#  the reply attributes based on the user name inside
-		#  of the tunnel, then set this configuration entry to
-		#  'yes', and the reply to the NAS will be taken from
-		#  the reply to the tunneled request.
-		#
-		#  allowed values: {no, yes}
-		#
-		use_tunneled_reply = no
-
-		#
-		#  The inner tunneled request can be sent
-		#  through a virtual server constructed
-		#  specifically for this purpose.
-		#
-		#  If this entry is commented out, the inner
-		#  tunneled request will be sent through
-		#  the virtual server that processed the
-		#  outer requests.
-		#
-		virtual_server = "inner-tunnel"
-
-		#  This has the same meaning, and overwrites, the
-		#  same field in the "tls" configuration, above.
-		#  The default value here is "yes".
-		#
-	#	include_length = yes
-
-		#
-		# Unlike EAP-TLS, EAP-TTLS does not require a client
-		# certificate. However, you can require one by setting the
-		# following option. You can also override this option by
-		# setting
-		#
-		#	EAP-TLS-Require-Client-Cert = Yes
-		#
-		# in the control items for a request.
-		#
-	#	require_client_cert = yes
-	}
-
-
-	## EAP-PEAP
-	#
-
-	##################################################
-	#
-	#  !!!!! WARNINGS for Windows compatibility  !!!!!
-	#
-	##################################################
-	#
-	#  If you see the server send an Access-Challenge,
-	#  and the client never sends another Access-Request,
-	#  then
-	#
-	#		STOP!
-	#
-	#  The server certificate has to have special OID's
-	#  in it, or else the Microsoft clients will silently
-	#  fail.  See the "scripts/xpextensions" file for
-	#  details, and the following page:
-	#
-	#	http://support.microsoft.com/kb/814394/en-us
-	#
-	#  For additional Windows XP SP2 issues, see:
-	#
-	#	http://support.microsoft.com/kb/885453/en-us
-	#
-	#
-	#  If is still doesn't work, and you're using Samba,
-	#  you may be encountering a Samba bug.  See:
-	#
-	#	https://bugzilla.samba.org/show_bug.cgi?id=6563
-	#
-	#  Note that we do not necessarily agree with their
-	#  explanation... but the fix does appear to work.
-	#
-	##################################################
-
-	#
-	#  The tunneled EAP session needs a default EAP type
-	#  which is separate from the one for the non-tunneled
-	#  EAP module.  Inside of the TLS/PEAP tunnel, we
-	#  recommend using EAP-MS-CHAPv2.
-	#
-	peap {
-		#  Which tls-config section the TLS negotiation parameters
-		#  are in - see EAP-TLS above for an explanation.
-		#
-		#  In the case that an old configuration from FreeRADIUS
-		#  v2.x is being used, all the options of the tls-config
-		#  section may also appear instead in the 'tls' section
-		#  above. If that is done, the tls= option here (and in
-		#  tls above) MUST be commented out.
-		#
-		tls = tls-common
-
-		#  The tunneled EAP session needs a default
-		#  EAP type which is separate from the one for
-		#  the non-tunneled EAP module.  Inside of the
-		#  PEAP tunnel, we recommend using MS-CHAPv2,
-		#  as that is the default type supported by
-		#  Windows clients.
-		#
-		default_eap_type = mschapv2
-
-		#  The PEAP module also has these configuration
-		#  items, which are the same as for TTLS.
-		#
-		copy_request_to_tunnel = no
-		use_tunneled_reply = no
-
-		#  When the tunneled session is proxied, the
-		#  home server may not understand EAP-MSCHAP-V2.
-		#  Set this entry to "no" to proxy the tunneled
-		#  EAP-MSCHAP-V2 as normal MSCHAPv2.
-		#
-	#	proxy_tunneled_request_as_eap = yes
-
-		#
-		#  The inner tunneled request can be sent
-		#  through a virtual server constructed
-		#  specifically for this purpose.
-		#
-		#  If this entry is commented out, the inner
-		#  tunneled request will be sent through
-		#  the virtual server that processed the
-		#  outer requests.
-		#
-		virtual_server = "inner-tunnel"
-
-		# This option enables support for MS-SoH
-		# see doc/SoH.txt for more info.
-		# It is disabled by default.
-		#
-	#	soh = yes
-
-		#
-		# The SoH reply will be turned into a request which
-		# can be sent to a specific virtual server:
-		#
-	#	soh_virtual_server = "soh-server"
-
-		#
-		# Unlike EAP-TLS, PEAP does not require a client certificate.
-		# However, you can require one by setting the following
-		# option. You can also override this option by setting
-		#
-		#	EAP-TLS-Require-Client-Cert = Yes
-		#
-		# in the control items for a request.
-		#
-	#	require_client_cert = yes
-	}
-
-	#
-	#  This takes no configuration.
-	#
-	#  Note that it is the EAP MS-CHAPv2 sub-module, not
-	#  the main 'mschap' module.
-	#
-	#  Note also that in order for this sub-module to work,
-	#  the main 'mschap' module MUST ALSO be configured.
-	#
-	#  This module is the *Microsoft* implementation of MS-CHAPv2
-	#  in EAP.  There is another (incompatible) implementation
-	#  of MS-CHAPv2 in EAP by Cisco, which FreeRADIUS does not
-	#  currently support.
-	#
-	mschapv2 {
-		#  Prior to version 2.1.11, the module never
-		#  sent the MS-CHAP-Error message to the
-		#  client.  This worked, but it had issues
-		#  when the cached password was wrong.  The
-		#  server *should* send "E=691 R=0" to the
-		#  client, which tells it to prompt the user
-		#  for a new password.
-		#
-		#  The default is to behave as in 2.1.10 and
-		#  earlier, which is known to work.  If you
-		#  set "send_error = yes", then the error
-		#  message will be sent back to the client.
-		#  This *may* help some clients work better,
-		#  but *may* also cause other clients to stop
-		#  working.
-		#
-#		send_error = no
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/echo b/src/test/setup/radius-config/freeradius/mods-available/echo
deleted file mode 100644
index c21a8ff..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/echo
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- text -*-
-#
-#  $Id: ad3e15933f9e85c5566810432a5fec8f23d877c1 $
-
-#
-#  This is a more general example of the execute module.
-#
-#  This one is called "echo".
-#
-#  Attribute-Name = `%{echo:/path/to/program args}`
-#
-#  If you wish to execute an external program in more than
-#  one section (e.g. 'authorize', 'pre_proxy', etc), then it
-#  is probably best to define a different instance of the
-#  'exec' module for every section.
-#
-#  The return value of the program run determines the result
-#  of the exec instance call as follows:
-#  (See doc/configurable_failover for details)
-#
-#  < 0 : fail      the module failed
-#  = 0 : ok        the module succeeded
-#  = 1 : reject    the module rejected the user
-#  = 2 : fail      the module failed
-#  = 3 : ok        the module succeeded
-#  = 4 : handled   the module has done everything to handle the request
-#  = 5 : invalid   the user's configuration entry was invalid
-#  = 6 : userlock  the user was locked out
-#  = 7 : notfound  the user was not found
-#  = 8 : noop      the module did nothing
-#  = 9 : updated   the module updated information in the request
-#  > 9 : fail      the module failed
-#
-exec echo {
-	#
-	#  Wait for the program to finish.
-	#
-	#  If we do NOT wait, then the program is "fire and
-	#  forget", and any output attributes from it are ignored.
-	#
-	#  If we are looking for the program to output
-	#  attributes, and want to add those attributes to the
-	#  request, then we MUST wait for the program to
-	#  finish, and therefore set 'wait=yes'
-	#
-	# allowed values: {no, yes}
-	wait = yes
-
-	#
-	#  The name of the program to execute, and it's
-	#  arguments.  Dynamic translation is done on this
-	#  field, so things like the following example will
-	#  work.
-	#
-	program = "/bin/echo %{User-Name}"
-
-	#
-	#  The attributes which are placed into the
-	#  environment variables for the program.
-	#
-	#  Allowed values are:
-	#
-	#	request		attributes from the request
-	#	config		attributes from the configuration items list
-	#	reply		attributes from the reply
-	#	proxy-request	attributes from the proxy request
-	#	proxy-reply	attributes from the proxy reply
-	#
-	#  Note that some attributes may not exist at some
-	#  stages.  e.g. There may be no proxy-reply
-	#  attributes if this module is used in the
-	#  'authorize' section.
-	#
-	input_pairs = request
-
-	#
-	#  Where to place the output attributes (if any) from
-	#  the executed program.  The values allowed, and the
-	#  restrictions as to availability, are the same as
-	#  for the input_pairs.
-	#
-	output_pairs = reply
-
-	#
-	#  When to execute the program.  If the packet
-	#  type does NOT match what's listed here, then
-	#  the module does NOT execute the program.
-	#
-	#  For a list of allowed packet types, see
-	#  the 'dictionary' file, and look for VALUEs
-	#  of the Packet-Type attribute.
-	#
-	#  By default, the module executes on ANY packet.
-	#  Un-comment out the following line to tell the
-	#  module to execute only if an Access-Accept is
-	#  being sent to the NAS.
-	#
-	#packet_type = Access-Accept
-
-	#
-	#  Should we escape the environment variables?
-	#
-	#  If this is set, all the RADIUS attributes
-	#  are capitalised and dashes replaced with
-	#  underscores. Also, RADIUS values are surrounded
-	#  with double-quotes.
-	#
-	#  That is to say: User-Name=BobUser => USER_NAME="BobUser"
-	shell_escape = yes
-
-	#
-	#  How long should we wait for the program to finish?
-	#
-	#  Default is 10 seconds, which should be plenty for nearly
-	#  anything. Range is 1 to 30 seconds. You are strongly
-	#  encouraged to NOT increase this value. Decreasing can
-	#  be used to cause authentication to fail sooner when you
-	#  know it's going to fail anyway due to the time taken,
-	#  thereby saving resources.
-	#
-	#timeout = 10
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/etc_group b/src/test/setup/radius-config/freeradius/mods-available/etc_group
deleted file mode 100644
index 6aea41b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/etc_group
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- text -*-
-#
-#  $Id: f58b72f560ba067991d67295b546691bcd992d44 $
-
-#  "passwd" configuration, for the /etc/group file. Adds a Etc-Group-Name
-#  attribute for every group that the user is member of.
-#
-#  You will have to define the Etc-Group-Name in the 'dictionary' file
-#  as a 'string' type.
-#
-#  The Group and Group-Name attributes are automatically created by
-#  the Unix module, and do checking against /etc/group automatically.
-#  This means that you CANNOT use Group or Group-Name to do any other
-#  kind of grouping in the server.  You MUST define a new group
-#  attribute.
-#
-#  i.e. this module should NOT be used as-is, but should be edited to
-#  point to a different group file.
-#
-passwd etc_group {
-	filename = /etc/group
-	format = "=Etc-Group-Name:::*,User-Name"
-	hash_size = 50
-	ignore_nislike = yes
-	allow_multiple_keys = yes
-	delimiter = ":"
-}
-
diff --git a/src/test/setup/radius-config/freeradius/mods-available/exec b/src/test/setup/radius-config/freeradius/mods-available/exec
deleted file mode 100644
index 470b9cb..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/exec
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 5f21e4350f091ed51813865a31b2796c4b487f9f $
-
-#
-#  Execute external programs
-#
-#  This module is useful only for 'xlat'.  To use it,
-#  put 'exec' into the 'instantiate' section.  You can then
-#  do dynamic translation of attributes like:
-#
-#  Attribute-Name = `%{exec:/path/to/program args}`
-#
-#  The value of the attribute will be replaced with the output
-#  of the program which is executed.  Due to RADIUS protocol
-#  limitations, any output over 253 bytes will be ignored.
-#
-#  The RADIUS attributes from the user request will be placed
-#  into environment variables of the executed program, as
-#  described in "man unlang" and in doc/variables.txt
-#
-#  See also "echo" for more sample configuration.
-#
-exec {
-	wait = no
-	input_pairs = request
-	shell_escape = yes
-	output = none
-	timeout = 10
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/expiration b/src/test/setup/radius-config/freeradius/mods-available/expiration
deleted file mode 100644
index dfc0550..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/expiration
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 5d06454d0a8ccce7f50ddf7b01ba01c4ace6560a $
-
-#
-# The expiration module. This handles the Expiration attribute
-# It should be included in the *end* of the authorize section
-# in order to handle user Expiration. It should also be included
-# in the instantiate section in order to register the Expiration
-# compare function
-#
-expiration {
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/expr b/src/test/setup/radius-config/freeradius/mods-available/expr
deleted file mode 100644
index ab7de2d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/expr
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 1e130ef24a4dbcd55f347ebd799a8b2bf4f3333a $
-
-#
-#  This module is useful only for 'xlat'.  To use it,
-#  put 'expr' into the 'instantiate' section.  You can then
-#  do dynamic translation of attributes like:
-#
-#  Attribute-Name = `%{expr:2 + 3 + %{exec: uid -u}}`
-#
-#  The value of the attribute will be replaced with the output
-#  of the program which is executed.  Due to RADIUS protocol
-#  limitations, any output over 253 bytes will be ignored.
-#
-#  The module also registers a few paircompare functions, and
-#  many string manipulation functions.
-#
-
-expr {
-	#
-	# Characters that will not be encoded by the %{encode}
-	# xlat function.
-	#
-	safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/files b/src/test/setup/radius-config/freeradius/mods-available/files
deleted file mode 100644
index 0e92702..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/files
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- text -*-
-#
-#  $Id: c14992f05c13983fa0c0f9bcead4c1cf0c1bb801 $
-
-# Livingston-style 'users' file
-#
-# See "man users" for more information.
-#
-files {
-	# Search for files in a subdirectory of mods-config which
-	# matches this instance of the files module.
-	moddir = ${modconfdir}/${.:instance}
-
-	# The default key attribute to use for matches.  The content
-	# of this attribute is used to match the "name" of the
-	# entry.
-	#key = "%{%{Stripped-User-Name}:-%{User-Name}}"
-
-	#  The old "users" style file is now located here.
-	filename = ${moddir}/authorize
-
-	#  This is accepted for backwards compatibility
-	#  It will be removed in a future release.
-	usersfile = ${moddir}/authorize
-
-	#  These are accepted for backwards compatibility.
-	#  They will be renamed in a future release.
-	acctusersfile = ${moddir}/accounting
-	preproxy_usersfile = ${moddir}/pre-proxy
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/idn b/src/test/setup/radius-config/freeradius/mods-available/idn
deleted file mode 100644
index 31874c5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/idn
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 534054077d52a7bb0bf8e02c1e861e5c86b76df9 $
-
-#
-# Internationalised domain names.
-#
-
-#  The expansion string: %{idn: example.com} results in an ASCII
-#  punycode version of the domain name.  That version can then be used
-#  for name comparisons.  Using an i18n version of the name is NOT
-#  RECOMMENDED, as that version is not canonical.
-#
-#  i.e. the "same" domain name can be represented in many, many,
-#  different ways.  Only the idn version has *one* representation.
-#
-idn {
-	#
-	#  Allow use of unassigned Unicode code points.
-	#
-	allow_unassigned = no
-
-	#
-	#  Prohibit underscores and other invalid characters in domain
-	#  names.
-	use_std3_ascii_rules = yes
-
-}
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-available/inner-eap b/src/test/setup/radius-config/freeradius/mods-available/inner-eap
deleted file mode 100644
index 9eed1ce..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/inner-eap
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 9a690b77c2eaea1086d9748012c380283714f452 $
-
-#
-#  Sample configuration for an EAP module that occurs *inside*
-#  of a tunneled method.  It is used to limit the EAP types that
-#  can occur inside of the inner tunnel.
-#
-#  See also raddb/sites-available/inner-tunnel
-#
-#  See raddb/mods-available/eap for full documentation on the meaning of these
-#  configuration entries.
-#
-eap inner-eap {
-	# This is the best choice for PEAP.
-	default_eap_type = mschapv2
-
-	timer_expire     = 60
-
-	#  This should be the same as the outer eap "max sessions"
-	max_sessions = 2048
-
-	# Supported EAP-types
-	md5 {
-	}
-
-	gtc {
-		#  The default challenge, which many clients
-		#  ignore..
-		#challenge = "Password: "
-
-		auth_type = PAP
-	}
-
-	mschapv2 {
-		# See eap for documentation
-#		send_error = no
-	}
-
-	# No TTLS or PEAP configuration should be listed here.
-
-	## EAP-TLS
-	#
-	#  You SHOULD use different certificates than are used
-	#  for the outer EAP configuration!
-	#
-	#  Support for PEAP/TLS and RFC 5176 TLS/TLS is experimental.
-	#  It might work, or it might not.
-	#
-	tls {
-		private_key_password = whatever
-		private_key_file = ${certdir}/inner-server.pem
-
-		#  If Private key & Certificate are located in
-		#  the same file, then private_key_file &
-		#  certificate_file must contain the same file
-		#  name.
-		#
-		#  If ca_file (below) is not used, then the
-		#  certificate_file below MUST include not
-		#  only the server certificate, but ALSO all
-		#  of the CA certificates used to sign the
-		#  server certificate.
-		certificate_file = ${certdir}/inner-server.pem
-
-		#  You may want different CAs for inner and outer
-		#  certificates.  If so, edit this file.
-		ca_file = ${cadir}/ca.pem
-
-		cipher_list = "DEFAULT"
-
-		#  You may want to set a very small fragment size.
-		#  The TLS data here needs to go inside of the
-		#  outer EAP-TLS protocol.
-		#
-		#  Try values and see if they work...
-	#	fragment_size = 1024
-
-		#  Other needful things
-		dh_file = ${certdir}/dh
-		random_file = ${certdir}/random
-
-		#  CRL and OCSP things go here.  See the main "eap"
-		#  file for details.
-	#	check_crl = yes
-	#	ca_path = /path/to/directory/with/ca_certs/and/crls/
-
-		#
-		#  The session resumption / fast re-authentication
-		#  cache CANNOT be used for inner sessions.
-		#
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ippool b/src/test/setup/radius-config/freeradius/mods-available/ippool
deleted file mode 100644
index 8b263bd..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/ippool
+++ /dev/null
@@ -1,66 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 1d3305ba45ec71336f55f8f1db05f183772e1b82 $
-
-#  Do server side ip pool management. Should be added in
-#  post-auth and accounting sections.
-#
-#  The module also requires the existence of the Pool-Name
-#  attribute. That way the administrator can add the Pool-Name
-#  attribute in the user profiles and use different pools for
-#  different users. The Pool-Name attribute is a *check* item
-#  not a reply item.
-#
-#  The Pool-Name should be set to the ippool module instance
-#  name or to DEFAULT to match any module.
-
-#
-# Example:
-# radiusd.conf: ippool students { [...] }
-#		ippool teachers { [...] }
-# users file  : DEFAULT Group == students, Pool-Name := "students"
-#		DEFAULT Group == teachers, Pool-Name := "teachers"
-#		DEFAULT	Group == other, Pool-Name := "DEFAULT"
-#
-# Note: If you change the range parameters you must then erase the
-#       db files.
-#
-ippool main_pool {
-	#  The main db file used to allocate addresses.
-	filename = ${db_dir}/db.ippool
-
-	#  The start and end ip addresses for this pool.
-	range_start = 192.0.2.1
-	range_stop = 192.0.2.254
-
-	#  The network mask used for this pool.
-	netmask = 255.255.255.0
-
-	#  The gdbm cache size for the db files. Should
-	#  be equal to the number of ip's available in
-	#  the ip pool
-	cache_size = 800
-
-	#  Helper db index file used in multilink
-	ip_index = ${db_dir}/db.ipindex
-
-	#  If set, the Framed-IP-Address already in the
-	#  reply (if any) will be discarded, and replaced
-	#  ith a Framed-IP-Address assigned here.
-	override = no
-
-	#  Specifies the maximum time in seconds that an
-	#  entry may be active.  If set to zero, means
-	#  "no timeout".  The default value is 0
-	maximum_timeout = 0
-
-	#  The key to use for the session database (which
-	#  holds the allocated ip's) normally it should
-	#  just be the nas ip/port (which is the default).
-	#
-	#  If your NAS sends the same value of NAS-Port
-	#  all requests, the key should be based on some
-	#  other attribute that is in ALL requests, AND
-	#  is unique to each machine needing an IP address.
-#	key = "%{NAS-IP-Address} %{NAS-Port}"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/krb5 b/src/test/setup/radius-config/freeradius/mods-available/krb5
deleted file mode 100644
index eaadd9f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/krb5
+++ /dev/null
@@ -1,59 +0,0 @@
-# -*- text -*-
-#
-#  $Id: d17b8b8fb8b442869e4aff143d345168875c55c8 $
-
-#
-#  Kerberos.  See doc/rlm_krb5 for minimal docs.
-#
-krb5 {
-	keytab = /path/to/keytab
-	service_principal = name_of_principle
-
-	#  Pool of krb5 contexts, this allows us to make the module multithreaded
-	#  and to avoid expensive operations like resolving and opening keytabs
-	#  on every request.  It may also allow TCP connections to the KDC to be
-	#  cached if that is supported by the version of libkrb5 used.
-	#
-	#  The context pool is only used if the underlying libkrb5 reported
-	#  that it was thread safe at compile time.
-	pool {
-		# Number of contexts to create
-		start = 10
-
-		# Minimum number of contexts to keep available
-		min = 4
-
-		# Maximum number of contexts
-		#
-		# If these contexts are all in use and a new one
-		# is requested, the request will NOT get a connection.
-		max = 10
-
-		# Spare contexts to be left idle
-		#
-		# NOTE: Idle contexts WILL be closed if "idle_timeout"
-		# is set.
-		spare = 3
-
-		# Number of uses before the context is freed
-		# 0 means "infinite"
-		uses = 0
-
-		# The lifetime (in seconds) of the context
-		lifetime = 0
-
-		# idle timeout (in seconds).  A context which is
-		# unused for this length of time will be freed.
-		idle_timeout = 60
-
-		# NOTE: All configuration settings are enforced.  If a
-		# context is closed because of "idle_timeout",
-		# "uses", or "lifetime", then the total number of
-		# contexts MAY fall below "min".  When that
-		# happens, it will create a new context.  It will
-		# also log a WARNING message.
-		#
-		# The solution is to either lower the "min" contexts,
-		# or increase lifetime/idle_timeout.
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ldap b/src/test/setup/radius-config/freeradius/mods-available/ldap
deleted file mode 100644
index 8b9e667..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/ldap
+++ /dev/null
@@ -1,468 +0,0 @@
-# -*- text -*-
-#
-#  $Id: af3f155ff51f4ebe7bfaffcb55a23238f128e843 $
-
-#
-#  Lightweight Directory Access Protocol (LDAP)
-#
-ldap {
-	#  Note that this needs to match the name(s) in the LDAP server
-	#  certificate, if you're using ldaps.  See OpenLDAP documentation
-	#  for the behavioral semantics of specifying more than one host.
-	server = "ldap.rrdns.example.org ldap.rrdns.example.org ldap.example.org"
-
-	#  Port to connect on, defaults to 389. Setting this to 636 will enable
-	#  LDAPS if start_tls (see below) is not able to be used.
-#	port = 389
-
-	#  Administrator account for searching and possibly modifying.
-#	identity = "cn=admin,dc=example,dc=org"
-#	password = mypass
-
-	#  Unless overridden in another section, the dn from which all
-	#  searches will start from.
-#	base_dn = "dc=example,dc=org"
-
-	#
-	#  Generic valuepair attribute
-	#
-	
-	#  If set, this will attribute will be retrieved in addition to any
-	#  mapped attributes.
-	#
-	#  Values should be in the format:
-	#  	<radius attr> <op> <value>
-	#
-	#  Where:
-	#  	<radius attr>:	Is the attribute you wish to create
-	# 			with any valid list and request qualifiers.
-	#  	<op>: 		Is any assignment attribute (=, :=, +=, -=).
-	#  	<value>:	Is the value to parse into the new valuepair.
-	# 			If the attribute name is wrapped in double
-	# 			quotes it will be xlat expanded.
-#	valuepair_attribute = "radiusAttribute"
-
-	#
-	#  Mapping of LDAP directory attributes to RADIUS dictionary attributes.
-	#
-	
-	#  WARNING: Although this format is almost identical to the unlang
-	#  update section format, it does *NOT* mean that you can use other
-	#  unlang constructs in module configuration files.
-	#
-	#  Configuration items are in the format:
-	# 	<radius attr> <op> <ldap attr>
-	#
-	#  Where:
-	#  	<radius attr>:	Is the destination RADIUS attribute
-	# 			with any valid list and request qualifiers.
-	#  	<op>: 		Is any assignment attribute (=, :=, +=, -=).
-	#  	<ldap attr>:	Is the attribute associated with user or
-	#			profile objects in the LDAP directory.
-	# 			If the attribute name is wrapped in double
-	# 			quotes it will be xlat expanded.
-	#
-	#  Request and list qualifiers may also be placed after the 'update'
-	#  section name to set defaults destination requests/lists
-	#  for unqualified RADIUS attributes.
-	#
-	#  Note: LDAP attribute names should be single quoted unless you want
-	#  the name value to be derived from an xlat expansion, or an
-	#  attribute ref.
-	update {
-		control:Password-With-Header	+= 'userPassword'
-#		control:NT-Password		:= 'ntPassword'
-#		reply:Reply-Message		:= 'radiusReplyMessage'
-#		reply:Tunnel-Type		:= 'radiusTunnelType'
-#		reply:Tunnel-Medium-Type	:= 'radiusTunnelMediumType'
-#		reply:Tunnel-Private-Group-ID	:= 'radiusTunnelPrivategroupId'
-
-		#  These are provided for backwards compatibility.
-		#  Where only a list is specified as the RADIUS attribute,
-		#  the value of the LDAP attribute is parsed as a valuepair
-		#  in the same format as the 'valuepair_attribute' (above).
-#		control:			+= 'radiusCheckAttributes'
-#		reply:				+= 'radiusReplyAttributes'
-	}
-
-	#  Set to yes if you have eDirectory and want to use the universal
-	#  password mechanism.
-#	edir = no
-
-	#  Set to yes if you want to bind as the user after retrieving the
-	#  Cleartext-Password. This will consume the login grace, and
-	#  verify user authorization.
-#	edir_autz = no
-
-	#  Note: set_auth_type was removed in v3.x.x
-	#  Equivalent functionality can be achieved by adding the following
-	#  stanza to the authorize {} section of your virtual server.
-	#
-	#    ldap
-	#    if ((ok || updated) && User-Password) {
-	#        update {
-	#            control:Auth-Type := ldap
-	#        }
-	#    }
-	
-	#
-	#  User object identification.
-	#
-	user {
-		#  Where to start searching in the tree for users
-		base_dn = "${..base_dn}"
-
-		#  Filter for user objects, should be specific enough
-		#  to identify a single user object.
-		filter = "(uid=%{%{Stripped-User-Name}:-%{User-Name}})"
-
-		#  Search scope, may be 'base', 'one', sub' or 'children'
-#		scope = 'sub'
-
-		#  If this is undefined, anyone is authorised.
-		#  If it is defined, the contents of this attribute
-		#  determine whether or not the user is authorised
-#		access_attribute = "dialupAccess"
-
-		#  Control whether the presence of "access_attribute"
-		#  allows access, or denys access.
-		#
-		#  If "yes", and the access_attribute is present, or
-		#  "no" and the access_attribute is absent then access
-		#  will be allowed.
-		#
-		#  If "yes", and the access_attribute is absent, or
-		#  "no" and the access_attribute is present, then
-		#  access will not be allowed.
-		#
-		#  If the value of the access_attribute is "false", it
-		#  will negate the result.
-		#
-		#  e.g.
-		#    access_positive = yes
-		#    access_attribute = userAccessAllowed
-		#
-		#    userAccessAllowed = false
-		#
-		#  Will result in the user being locked out.
-#		access_positive = yes
-	}
-
-	#
-	#  User membership checking.
-	#
-	group {
-		#  Where to start searching in the tree for groups
-		base_dn = "${..base_dn}"
-
-		#  Filter for group objects, should match all available
-		#  group objects a user might be a member of.
-		filter = "(objectClass=posixGroup)"
-
-		# Search scope, may be 'base', 'one', sub' or 'children'
-#		scope = 'sub'
-
-		#  Attribute that uniquely identifies a group.
-		#  Is used when converting group DNs to group
-		#  names.
-#		name_attribute = cn
-
-		#  Filter to find group objects a user is a member of.
-		#  That is, group objects with attributes that
-		#  identify members (the inverse of membership_attribute).
-#		membership_filter = "(|(member=%{control:Ldap-UserDn})(memberUid=%{%{Stripped-User-Name}:-%{User-Name}}))"
-
-		#  The attribute in user objects which contain the names
-		#  or DNs of groups a user is a member of.
-		#
-		#  Unless a conversion between group name and group DN is
-		#  needed, there's no requirement for the group objects
-		#  referenced to actually exist.
-		membership_attribute = "memberOf"
-
-		#  If cacheable_name or cacheable_dn are enabled,
-		#  all group information for the user will be
-		#  retrieved from the directory and written to LDAP-Group
-		#  attributes appropriate for the instance of rlm_ldap.
-		#
-		#  For group comparisons these attributes will be checked
-		#  instead of querying the LDAP directory directly.
-		#
-		#  This feature is intended to be used with rlm_cache.
-		#
-		#  If you wish to use this feature, you should enable
-		#  the type that matches the format of your check items
-		#  i.e. if your groups are specified as DNs then enable
-		#  cacheable_dn else enable cacheable_name.
-#		cacheable_name = "no"
-#		cacheable_dn = "no"
-
-		#  Override the normal cache attribute (<inst>-LDAP-Group)
-		#  and create a custom attribute.  This can help if multiple
-		#  module instances are used in fail-over.
-#		cache_attribute = "LDAP-Cached-Membership"
-	}
-
-	#
-	#  User profiles. RADIUS profile objects contain sets of attributes
-	#  to insert into the request. These attributes are mapped using
-	#  the same mapping scheme applied to user objects.
-	#
-	profile {
-		#  Filter for RADIUS profile objects
-#		filter = "(objectclass=radiusprofile)"
-
-		#  The default profile applied to all users.
-#		default = "cn=radprofile,dc=example,dc=org"
-
-		#  The list of profiles which are applied (after the default)
-		#  to all users.
-		#  The "User-Profile" attribute in the control list
-		#  will override this setting at run-time.
-#		attribute = "radiusProfileDn"
-	}
-
-	#
-	#  Bulk load clients from the directory
-	#
-	client {
-		#   Where to start searching in the tree for clients
-		base_dn = "${..base_dn}"
-
-		#
-		#  Filter to match client objects
-		#
-		filter = '(objectClass=frClient)'
-
-		# Search scope, may be 'base', 'one', 'sub' or 'children'
-#		scope = 'sub'
-
-		#
-		#  Client attribute mappings are in the format:
-		#      <client attribute> = <ldap attribute>
-		#
-		#  Arbitrary attributes (accessible by %{client:<attr>}) are not yet supported.
-		#
-		#  The following attributes are required:
-		#    * identifier - IPv4 address, or IPv4 address with prefix, or hostname.
-		#    * secret - RADIUS shared secret.
-		#
-		#  The following attributes are optional:
-		#    * shortname - Friendly name associated with the client
-		#    * nas_type - NAS Type
-		#    * virtual_server - Virtual server to associate the client with
-		#    * require_message_authenticator - Whether we require the Message-Authenticator
-		#      attribute to be present in requests from the client.
-		#
-		#  Schemas are available in doc/schemas/ldap for openldap and eDirectory
-		#
-		attribute {
-			identifier			= 'radiusClientIdentifier'
-			secret				= 'radiusClientSecret'
-#			shortname			= 'radiusClientShortname'
-#			nas_type			= 'radiusClientType'
-#			virtual_server			= 'radiusClientVirtualServer'
-#			require_message_authenticator	= 'radiusClientRequireMa'
-		}
-	}
-
-	#  Load clients on startup
-#	read_clients = no
-
-	#
-	#  Modify user object on receiving Accounting-Request
-	#
-
-	#  Useful for recording things like the last time the user logged
-	#  in, or the Acct-Session-ID for CoA/DM.
-	#
-	#  LDAP modification items are in the format:
-	# 	<ldap attr> <op> <value>
-	#
-	#  Where:
-	#  	<ldap attr>:	The LDAP attribute to add modify or delete.
-	#  	<op>: 		One of the assignment operators:
-	#			(:=, +=, -=, ++).
-	#			Note: '=' is *not* supported.
-	#  	<value>:	The value to add modify or delete.
-	#
-	#  WARNING: If using the ':=' operator with a multi-valued LDAP
-	#  attribute, all instances of the attribute will be removed and
-	#  replaced with a single attribute.
-	accounting {
-		reference = "%{tolower:type.%{Acct-Status-Type}}"
-
-		type {
-			start {
-				update {
-					description := "Online at %S"
-				}
-			}
-
-			interim-update {
-				update {
-					description := "Last seen at %S"
-				}
-			}
-
-			stop {
-				update {
-					description := "Offline at %S"
-				}
-			}
-		}
-	}
-
-	#
-	#  Post-Auth can modify LDAP objects too
-	#
-	post-auth {
-		update {
-			description := "Authenticated at %S"
-		}
-	}
-
-	#
-	#  LDAP connection-specific options.
-	#
-	#  These options set timeouts, keep-alives, etc. for the connections.
-	#
-	options {
-		#
-		#  The following two configuration items are for Active Directory
-		#  compatibility.  If you set these to "no", then searches
-		#  will likely return "operations error", instead of a
-		#  useful result.
-		#
-		chase_referrals = yes
-		rebind = yes
-
-		#  Seconds to wait for LDAP query to finish. default: 20
-		timeout = 10
-
-		#  Seconds LDAP server has to process the query (server-side
-		#  time limit). default: 20
-		#
-		#  LDAP_OPT_TIMELIMIT is set to this value.
-		timelimit = 3
-
-		#  Seconds to wait for response of the server. (network
-		#  failures) default: 10
-		#
-		#  LDAP_OPT_NETWORK_TIMEOUT is set to this value.
-		net_timeout = 1
-
-		#  LDAP_OPT_X_KEEPALIVE_IDLE
-		idle = 60
-
-		#  LDAP_OPT_X_KEEPALIVE_PROBES
-		probes = 3
-
-		#  LDAP_OPT_X_KEEPALIVE_INTERVAL
-		interval = 3
-
-		#  ldap_debug: debug flag for LDAP SDK
-		#  (see OpenLDAP documentation).  Set this to enable
-		#  huge amounts of LDAP debugging on the screen.
-		#  You should only use this if you are an LDAP expert.
-		#
-		#	default: 0x0000 (no debugging messages)
-		#	Example:(LDAP_DEBUG_FILTER+LDAP_DEBUG_CONNS)
-		ldap_debug = 0x0028
-	}
-
-	#
-	#  This subsection configures the tls related items
-	#  that control how FreeRADIUS connects to an LDAP
-	#  server.  It contains all of the "tls_*" configuration
-	#  entries used in older versions of FreeRADIUS.  Those
-	#  configuration entries can still be used, but we recommend
-	#  using these.
-	#
-	tls {
-		# Set this to 'yes' to use TLS encrypted connections
-		# to the LDAP database by using the StartTLS extended
-		# operation.
-		#
-		# The StartTLS operation is supposed to be
-		# used with normal ldap connections instead of
-		# using ldaps (port 636) connections
-#		start_tls = yes
-
-#		ca_file	= ${certdir}/cacert.pem
-
-#		ca_path	= ${certdir}
-#		certificate_file = /path/to/radius.crt
-#		private_key_file = /path/to/radius.key
-#		random_file = ${certdir}/random
-
-		#  Certificate Verification requirements.  Can be:
-		#    "never" (don't even bother trying)
-		#    "allow" (try, but don't fail if the certificate
-		#		can't be verified)
-		#    "demand" (fail if the certificate doesn't verify.)
-		#
-		#  The default is "allow"
-#		require_cert	= "demand"
-	}
-
-
-	#  As of version 3.0, the "pool" section has replaced the
-	#  following configuration items:
-	#
-	#  ldap_connections_number
-
-	#  The connection pool is new for 3.0, and will be used in many
-	#  modules, for all kinds of connection-related activity.
-	#
-	#  When the server is not threaded, the connection pool
-	#  limits are ignored, and only one connection is used.
-	pool {
-		#  Number of connections to start
-		start = 5
-
-		#  Minimum number of connections to keep open
-		min = 4
-
-		#  Maximum number of connections
-		#
-		#  If these connections are all in use and a new one
-		#  is requested, the request will NOT get a connection.
-		#
-		#  Setting 'max' to LESS than the number of threads means
-		#  that some threads may starve, and you will see errors
-		#  like "No connections available and at max connection limit"
-		#
-		#  Setting 'max' to MORE than the number of threads means
-		#  that there are more connections than necessary.
-		max = ${thread[pool].max_servers}
-
-		#  Spare connections to be left idle
-		#
-		#  NOTE: Idle connections WILL be closed if "idle_timeout"
-		#  is set.
-		spare = 3
-
-		#  Number of uses before the connection is closed
-		#
-		#  0 means "infinite"
-		uses = 0
-
-		#  The lifetime (in seconds) of the connection
-		lifetime = 0
-
-		#  Idle timeout (in seconds).  A connection which is
-		#  unused for this length of time will be closed.
-		idle_timeout = 60
-
-		#  NOTE: All configuration settings are enforced.  If a
-		#  connection is closed because of "idle_timeout",
-		#  "uses", or "lifetime", then the total number of
-		#  connections MAY fall below "min".  When that
-		#  happens, it will open a new connection.  It will
-		#  also log a WARNING message.
-		#
-		#  The solution is to either lower the "min" connections,
-		#  or increase lifetime/idle_timeout.
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/linelog b/src/test/setup/radius-config/freeradius/mods-available/linelog
deleted file mode 100644
index d1b68bf..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/linelog
+++ /dev/null
@@ -1,113 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 779752bc10c156ba1981810186a4af828a18c014 $
-
-#
-#  The "linelog" module will log one line of text to a file.
-#  Both the filename and the line of text are dynamically expanded.
-#
-#  We STRONGLY suggest that you do not use data from the
-#  packet as part of the filename.
-#
-linelog {
-	#
-	#  The file where the logs will go.
-	#
-	#  If the filename is "syslog", then the log messages will
-	#  go to syslog.
-	filename = ${logdir}/linelog
-
-	#
-	#  The Unix-style permissions on the log file.
-	#
-	#  Depending on format string, the log file may contain secret or
-	#  private information about users.  Keep the file permissions as
-	#  restrictive as possible.
-	permissions = 0600
-
-	#
-	# The Unix group which owns the log file.
-	#
-	# The user that freeradius runs as must be in the specified
-	# group, otherwise it will not be possible to set the group.
-	#
-	# group = ${security.group}
-
-	#
-	# If logging via syslog, the facility can be set here. Otherwise
-	# the syslog_facility option in radiusd.conf will be used.
-	#
-	# syslog_facility = daemon
-
-	#
-	#  The default format string.
-	format = "This is a log message for %{User-Name}"
-
-	#
-	#  This next line can be omitted.  If it is omitted, then
-	#  the log message is static, and is always given by "format",
-	#  above.
-	#
-	#  If it is defined, then the string is dynamically expanded,
-	#  and the result is used to find another configuration entry
-	#  here, with the given name.  That name is then used as the
-	#  format string.
-	#
-	#  If the configuration entry cannot be found, then no log
-	#  message is printed.
-	#
-	#  i.e. You can have many log messages in one "linelog" module.
-	#  If this two-step expansion did not exist, you would have
-	#  needed to configure one "linelog" module for each log message.
-
-	#
-	#  Reference the Packet-Type (Access-Request, etc.)  If it doesn't
-	#  exist, reference the "format" entry, above.
-	reference = "messages.%{%{Packet-Type}:-default}"
-
-	#
-	#  The messages defined here are taken from the "reference"
-	#  expansion, above.
-	#
-	messages {
-		default = "Unknown packet type %{Packet-Type}"
-
-		Access-Request = "Requested access: %{User-Name}"
-		Access-Reject = "Rejected access: %{User-Name}"
-		Access-Challenge = "Sent challenge: %{User-Name}"
-	}
-}
-
-#
-#  Another example, for accounting packets.
-#
-linelog log_accounting {
-	#
-	#  Used if the expansion of "reference" fails.
-	#
-	format = ""
-
-	filename = ${logdir}/linelog-accounting
-
-	permissions = 0600
-
-	reference = "Accounting-Request.%{%{Acct-Status-Type}:-unknown}"
-
-	#
-	#  Another example:
-	#      
-	#
-	Accounting-Request {
-		Start = "Connect: [%{User-Name}] (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} ip %{Framed-IP-Address})"
-		Stop = "Disconnect: [%{User-Name}] (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} ip %{Framed-IP-Address}) %{Acct-Session-Time} seconds"
-
-		#  Don't log anything for these packets.
-		Alive = ""
-
-		Accounting-On = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) just came online"
-		Accounting-Off = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) just went offline"
-
-		# don't log anything for other Acct-Status-Types.
-		unknown = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) sent unknown Acct-Status-Type %{Acct-Status-Type}"
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/logintime b/src/test/setup/radius-config/freeradius/mods-available/logintime
deleted file mode 100644
index d4f6f3e..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/logintime
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 25344527759d22b49b5e990fd83f0e506442fa76 $
-
-# The logintime module. This handles the Login-Time,
-# Current-Time, and Time-Of-Day attributes.  It should be
-# included in the *end* of the authorize section in order to
-# handle Login-Time checks. It should also be included in the
-# instantiate section in order to register the Current-Time
-# and Time-Of-Day comparison functions.
-#
-# When the Login-Time attribute is set to some value, and the
-# user has been permitted to log in, a Session-Timeout is
-# calculated based on the remaining time.  See "doc/README".
-#
-logintime {
-	# The minimum timeout (in seconds) a user is allowed
-	# to have. If the calculated timeout is lower we don't
-	# allow the login. Some NAS do not handle values
-	# lower than 60 seconds well.
-	minimum_timeout = 60
-}
-
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mac2ip b/src/test/setup/radius-config/freeradius/mods-available/mac2ip
deleted file mode 100644
index 5d646af..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/mac2ip
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- text -*-
-#
-#  $Id: a4ead1d64e8220344b483718ece4712bef5e9e36 $
-
-######################################################################
-#
-#  This next section is a sample configuration for the "passwd"
-#  module, that reads flat-text files.
-#
-#  The file is in the format <mac>,<ip>
-#
-#	00:01:02:03:04:05,192.0.2.100
-#	01:01:02:03:04:05,192.0.2.101
-#	02:01:02:03:04:05,192.0.2.102
-#
-#  This lets you perform simple static IP assignments from a flat-text
-#  file.  You will have to define lease times yourself.
-#
-######################################################################
-
-passwd mac2ip {
-	filename = ${modconfdir}/${.:name}/${.:instance}
-	format = "*DHCP-Client-Hardware-Address:=DHCP-Your-IP-Address"
-	delimiter = ","
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mac2vlan b/src/test/setup/radius-config/freeradius/mods-available/mac2vlan
deleted file mode 100644
index ee8e4b3..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/mac2vlan
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- text -*-
-#
-#  $Id: a1db803a71cddbb98daeeeda515cff2fc77ea318 $
-
-#  A simple file to map a MAC address to a VLAN.
-#
-#  The file should be in the format MAC,VLAN
-#  the VLAN name cannot have spaces in it, for example:
-#
-#	00:01:02:03:04:05,VLAN1
-#	03:04:05:06:07:08,VLAN2
-#	...
-#
-passwd mac2vlan {
-	filename = ${modconfdir}/${.:name}/${.:instance}
-	format = "*VMPS-Mac:=VMPS-VLAN-Name"
-	delimiter = ","
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mschap b/src/test/setup/radius-config/freeradius/mods-available/mschap
deleted file mode 100644
index f2aa631..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/mschap
+++ /dev/null
@@ -1,106 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 2170df13dbb884fde5d596eba68056781ba3160c $
-
-# Microsoft CHAP authentication
-#
-#  This module supports MS-CHAP and MS-CHAPv2 authentication.
-#  It also enforces the SMB-Account-Ctrl attribute.
-#
-mschap {
-	#
-	#  If you are using /etc/smbpasswd, see the 'passwd'
-	#  module for an example of how to use /etc/smbpasswd
-
-	# if use_mppe is not set to no mschap will
-	# add MS-CHAP-MPPE-Keys for MS-CHAPv1 and
-	# MS-MPPE-Recv-Key/MS-MPPE-Send-Key for MS-CHAPv2
-	#
-#	use_mppe = no
-
-	# if mppe is enabled require_encryption makes
-	# encryption moderate
-	#
-#	require_encryption = yes
-
-	# require_strong always requires 128 bit key
-	# encryption
-	#
-#	require_strong = yes
-
-	# The module can perform authentication itself, OR
-	# use a Windows Domain Controller.  This configuration
-	# directive tells the module to call the ntlm_auth
-	# program, which will do the authentication, and return
-	# the NT-Key.  Note that you MUST have "winbindd" and
-	# "nmbd" running on the local machine for ntlm_auth
-	# to work.  See the ntlm_auth program documentation
-	# for details.
-	#
-	# If ntlm_auth is configured below, then the mschap
-	# module will call ntlm_auth for every MS-CHAP
-	# authentication request.  If there is a cleartext
-	# or NT hashed password available, you can set
-	# "MS-CHAP-Use-NTLM-Auth := No" in the control items,
-	# and the mschap module will do the authentication itself,
-	# without calling ntlm_auth.
-	#
-	# Be VERY careful when editing the following line!
-	#
-	# You can also try setting the user name as:
-	#
-	#	... --username=%{mschap:User-Name} ...
-	#
-	# In that case, the mschap module will look at the User-Name
-	# attribute, and do prefix/suffix checks in order to obtain
-	# the "best" user name for the request.
-	#
-#	ntlm_auth = "/path/to/ntlm_auth --request-nt-key --username=%{%{Stripped-User-Name}:-%{%{User-Name}:-None}} --challenge=%{%{mschap:Challenge}:-00} --nt-response=%{%{mschap:NT-Response}:-00}"
-
-	# The default is to wait 10 seconds for ntlm_auth to
-	# complete.  This is a long time, and if it's taking that
-	# long then you likely have other problems in your domain.
-	# The length of time can be decreased with the following
-	# option, which can save clients waiting if your ntlm_auth
-	# usually finishes quicker. Range 1 to 10 seconds.
-	#
-#	ntlm_auth_timeout = 10
-
-	passchange {
-		# This support MS-CHAPv2 (not v1) password change
-		# requests.  See doc/mschap.rst for more IMPORTANT
-		# information.
-		#
-		# Samba/ntlm_auth - if you are using ntlm_auth to
-		# validate passwords, you will need to use ntlm_auth
-		# to change passwords.  Uncomment the three lines
-		# below, and change the path to ntlm_auth.
-		#
-#		ntlm_auth = "/usr/bin/ntlm_auth --helper-protocol=ntlm-change-password-1"
-#		ntlm_auth_username = "username: %{mschap:User-Name}"
-#		ntlm_auth_domain = "nt-domain: %{mschap:NT-Domain}"
-
-		# To implement a local password change, you need to
-		# supply a string which is then expanded, so that the
-		# password can be placed somewhere.  e.g. passed to a
-		# script (exec), or written to SQL (UPDATE/INSERT).
-		# We give both examples here, but only one will be
-		# used.
-		#
-#		local_cpw = "%{exec:/path/to/script %{mschap:User-Name} %{MS-CHAP-New-Cleartext-Password}}"
-		#
-#		local_cpw = "%{sql:UPDATE radcheck set value='%{MS-CHAP-New-NT-Password}' where username='%{SQL-User-Name}' and attribute='NT-Password'}"
-	}
-
-	# For Apple Server, when running on the same machine as
-	# Open Directory.  It has no effect on other systems.
-	#
-#	use_open_directory = yes
-
-	# On failure, set (or not) the MS-CHAP error code saying
-	# "retries allowed".
-#	allow_retry = yes
-
-	# An optional retry message.
-#	retry_msg = "Re-enter (or reset) the password"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth b/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth
deleted file mode 100644
index 9ee11aa..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-#  For testing ntlm_auth authentication with PAP.
-#
-#  If you have problems with authentication failing, even when the
-#  password is good, it may be a bug in Samba:
-#
-#	https://bugzilla.samba.org/show_bug.cgi?id=6563
-#
-exec ntlm_auth {
-	wait = yes
-	program = "/path/to/ntlm_auth --request-nt-key --domain=MYDOMAIN --username=%{mschap:User-Name} --password=%{User-Password}"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/opendirectory b/src/test/setup/radius-config/freeradius/mods-available/opendirectory
deleted file mode 100644
index 10dd507..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/opendirectory
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 2a44ef695f4eaf6f1c461b3d92fda54e9b910f9e $
-
-#  This module is only used when the server is running on the same
-#  system as OpenDirectory.  The configuration of the module is hard-coded
-#  by Apple, and cannot be changed here.
-#
-#  There are no configuration entries for this module.
-#
-opendirectory {
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/otp b/src/test/setup/radius-config/freeradius/mods-available/otp
deleted file mode 100644
index 03d0262..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/otp
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-#  Configuration for the OTP module.
-#
-
-#  This module allows you to use various handheld OTP tokens
-#  for authentication (Auth-Type := otp).  These tokens are
-#  available from various vendors.
-#
-#  It works in conjunction with otpd, which implements token
-#  management and OTP verification functions; and lsmd or gsmd,
-#  which implements synchronous state management functions.
-#  otpd, lsmd and gsmd are available from TRI-D Systems:
-#              <http://www.tri-dsystems.com/>
-
-#  You must list this module in BOTH the authorize and authenticate
-#  sections in order to use it.
-otp {
-	# otpd rendezvous point.
-	# (default: /var/run/otpd/socket)
-	#otpd_rp = /var/run/otpd/socket
-
-	# Text to use for the challenge.
-	# Default "Challenge: %{reply:OTP-Challenge}\n Response: "
-
-	challenge_prompt = "Challenge: %{reply:OTP-Challenge} \n Response: "
-
-	# Length of the challenge.  Most tokens probably support a
-	# max of 8 digits.  (range: 5-32 digits, default 6)
-	#challenge_length = 6
-
-	# Maximum time, in seconds, that a challenge is valid.
-	# (The user must respond to a challenge within this time.)
-	# It is also the minimal time between consecutive async mode
-	# authentications, a necessary restriction due to an inherent
-	# weakness of the RADIUS protocol which allows replay attacks.
-	# (default: 30)
-	#challenge_delay = 30
-
-	# Whether or not to allow asynchronous ("pure" challenge/
-	# response) mode authentication.  Since sync mode is much more
-	# usable, and all reasonable tokens support it, the typical
-	# use of async mode is to allow re-sync of event based tokens.
-	# But because of the vulnerability of async mode with some tokens,
-	# you probably want to disable this and require that out-of-sync
-	# users re-sync from specifically secured terminals.
-	# See the otpd docs for more info.
-	# (default: no)
-	#allow_async = no
-
-	# Whether or not to allow synchronous mode authentication.
-	# When using otpd with lsmd, it is *CRITICALLY IMPORTANT*
-	# that if your OTP users can authenticate to multiple RADIUS
-	# servers, this must be "yes" for the primary/default server,
-	# and "no" for the others.  This is because lsmd does not
-	# share state information across multiple servers.  Using "yes"
-	# on all your RADIUS servers would allow replay attacks!
-	# Also, for event based tokens, the user will be out of sync
-	# on the "other" servers.  In order to use "yes" on all your
-	# servers, you must either use gsmd, which synchronises state
-	# globally, or implement your own state synchronisation method.
-	# (default: yes)
-	#allow_sync = yes
-
-	# If both allow_async and allow_sync are "yes", a challenge is
-	# always presented to the user.  This is incompatible with NAS
-	# that can't present or don't handle Access-Challenge's, e.g.
-	# PPTP servers.  Even though a challenge is presented, the user
-	# can still enter their synchronous passcode.
-
-	# The following are MPPE settings.  Note that MS-CHAP (v1) is
-	# strongly discouraged.  All possible values are listed as
-	# {value = meaning}.  Default values are first.
-	#mschapv2_mppe = {2 = required, 1 = optional, 0 = forbidden}
-	#mschapv2_mppe_bits = {2 = 128, 1 = 128 or 40, 0 = 40}
-	#mschap_mppe = {2 = required, 1 = optional, 0 = forbidden}
-	#mschap_mppe_bits = {2 = 128}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/pam b/src/test/setup/radius-config/freeradius/mods-available/pam
deleted file mode 100644
index a31dfda..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/pam
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- text -*-
-#
-#  $Id: f4a91a948637bb2f42f613ed9faa6f9ae9ae6099 $
-
-
-# Pluggable Authentication Modules
-#
-#  For Linux, see:
-#	http://www.kernel.org/pub/linux/libs/pam/index.html
-#
-#  WARNING: On many systems, the system PAM libraries have
-#           memory leaks!  We STRONGLY SUGGEST that you do not
-#	    use PAM for authentication, due to those memory leaks.
-#
-pam {
-	#
-	#  The name to use for PAM authentication.
-	#  PAM looks in /etc/pam.d/${pam_auth_name}
-	#  for it's configuration.  See 'redhat/radiusd-pam'
-	#  for a sample PAM configuration file.
-	#
-	#  Note that any Pam-Auth attribute set in the 'authorize'
-	#  section will over-ride this one.
-	#
-	pam_auth = radiusd
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/pap b/src/test/setup/radius-config/freeradius/mods-available/pap
deleted file mode 100644
index 1636b52..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/pap
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 0038ecd154840c71ceff33ddfdd936e4e28e0bcd $
-
-# PAP module to authenticate users based on their stored password
-#
-#  Supports multiple encryption/hash schemes.  See "man rlm_pap"
-#  for details.
-#
-#  For instructions on creating the various types of passwords, see:
-#
-#  http://www.openldap.org/faq/data/cache/347.html
-pap {
-	#  By default the server will use heuristics to try and automatically
-	#  handle base64 or hex encoded passwords. This behaviour can be
-	#  stopped by setting the following to "no".
-	normalise = yes
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/passwd b/src/test/setup/radius-config/freeradius/mods-available/passwd
deleted file mode 100644
index bf77f3a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/passwd
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 11bd2246642bf3c080327c7f4a67dc42603f3a6c $
-
-# passwd module allows to do authorization via any passwd-like
-# file and to extract any attributes from these files.
-#
-#  See the "smbpasswd" and "etc_group" files for more examples.
-#
-# parameters are:
-#   filename - path to file
-#
-#   format - format for filename record. This parameters
-#            correlates record in the passwd file and RADIUS
-#            attributes.
-#
-#            Field marked as '*' is a key field. That is, the parameter
-#            with this name from the request is used to search for
-#            the record from passwd file
-#
-#            Attributes marked as '=' are added to reply_items instead
-#            of default configure_items
-#
-#	     Attributes marked as '~' are added to request_items
-#
-#            Field marked as ',' may contain a comma separated list
-#            of attributes.
-#
-#   hash_size - hashtable size.  Setting it to 0 is no longer permitted
-#		A future version of the server will have the module
-#		automatically determine the hash size.  Having it set
-#		manually should not be necessary.
-#
-#   allow_multiple_keys - if many records for a key are allowed
-#
-#   ignore_nislike - ignore NIS-related records
-#
-#   delimiter - symbol to use as a field separator in passwd file,
-#            for format ':' symbol is always used. '\0', '\n' are
-#	     not allowed
-#
-
-#  An example configuration for using /etc/passwd.
-#
-#  This is an example which will NOT WORK if you have shadow passwords,
-#  NIS, etc.  The "unix" module is normally responsible for reading
-#  system passwords.  You should use it instead of this example.
-#
-passwd etc_passwd {
-	filename = /etc/passwd
-	format = "*User-Name:Crypt-Password:"
-	hash_size = 100
-	ignore_nislike = no
-	allow_multiple_keys = no
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/perl b/src/test/setup/radius-config/freeradius/mods-available/perl
deleted file mode 100644
index 6936a78..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/perl
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 3d9428e69c08fbc281f9830beae1cd8b7a5e5c70 $
-
-#  Persistent, embedded Perl interpreter.
-#
-perl {
-	#
-	#  The Perl script to execute on authorize, authenticate,
-	#  accounting, xlat, etc.  This is very similar to using
-	#  'rlm_exec' module, but it is persistent, and therefore
-	#  faster.
-	#
-	filename = ${modconfdir}/${.:instance}/example.pl
-
-	#
-	#  The following hashes are given to the module and
-	#  filled with value-pairs (Attribute names and values)
-	#
-	#  %RAD_CHECK		Check items
-	#  %RAD_REQUEST		Attributes from the request
-	#  %RAD_REPLY		Attributes for the reply
-	#  %RAD_REQUEST_PROXY	Attributes from the proxied request
-	#  %RAD_REQUEST_PROXY_REPLY Attributes from the proxy reply
-	#
-	#  The interface between FreeRADIUS and Perl is strings.
-	#  That is, attributes of type "octets" are converted to
-	#  printable strings, such as "0xabcdef".  If you want to
-	#  access the binary values of the attributes, you should
-	#  call the Perl "pack" function.  Then to send any binary
-	#  data back to FreeRADIUS, call the Perl "unpack" function,
-	#  so that the contents of the hashes are printable strings.
-	#
-	#  IP addresses are sent as strings, e.g. "192.0.2.25", and
-	#  not as a 4-byte binary value.  The same applies to other
-	#  attribute data types.
-	#
-	#  Attributes of type "string" are copied to Perl as-is.
-	#  They are not escaped or interpreted.
-	#
-	#  The return codes from functions in the perl_script
-	#  are passed directly back to the server.  These
-	#  codes are defined in mods-config/example.pl
-	#
-
-	# You can define configuration items (and nested sub-sections) in perl "config" section.
-	# These items will be accessible in the perl script through %RAD_PERLCONF hash.
-	# For instance: $RAD_PERLCONF{'name'} $RAD_PERLCONF{'sub-config'}->{'name'}
-	#
-	#config {
-	#	name = "value"
-	#	sub-config {
-	#		name = "value of name from config.sub-config"
-	#	}
-	#}
-	
-	#
-	#  List of functions in the module to call.
-	#  Uncomment and change if you want to use function
-	#  names other than the defaults.
-	#
-	#func_authenticate = authenticate
-	#func_authorize = authorize
-	#func_preacct = preacct
-	#func_accounting = accounting
-	#func_checksimul = checksimul
-	#func_pre_proxy = pre_proxy
-	#func_post_proxy = post_proxy
-	#func_post_auth = post_auth
-	#func_recv_coa = recv_coa
-	#func_send_coa = send_coa
-	#func_xlat = xlat
-	#func_detach = detach
-
-	#
-	#  Uncomment the following lines if you wish
-	#  to use separate functions for Start and Stop
-	#  accounting packets. In that case, the
-	#  func_accounting function is not called.
-	#
-	#func_start_accounting = accounting_start
-	#func_stop_accounting = accounting_stop
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/preprocess b/src/test/setup/radius-config/freeradius/mods-available/preprocess
deleted file mode 100644
index ae349e9..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/preprocess
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 8baec7961ba75fe52546cb1331868b0b2b1c38f4 $
-
-# Preprocess the incoming RADIUS request, before handing it off
-# to other modules.
-#
-#  This module processes the 'huntgroups' and 'hints' files.
-#  In addition, it re-writes some weird attributes created
-#  by some NAS, and converts the attributes into a form which
-#  is a little more standard.
-#
-preprocess {
-	# Search for files in a subdirectory of mods-config which
-	# matches this instance of the preprocess module.
-	moddir = ${modconfdir}/${.:instance}
-
-	huntgroups = ${moddir}/huntgroups
-	hints = ${moddir}/hints
-
-	# This hack changes Ascend's weird port numbering
-	# to standard 0-??? port numbers so that the "+" works
-	# for IP address assignments.
-	with_ascend_hack = no
-	ascend_channels_per_line = 23
-
-	# Windows NT machines often authenticate themselves as
-	# NT_DOMAIN\username
-	#
-	# If this is set to 'yes', then the NT_DOMAIN portion
-	# of the user-name is silently discarded.
-	#
-	# This configuration entry SHOULD NOT be used.
-	# See the "realms" module for a better way to handle
-	# NT domains.
-	with_ntdomain_hack = no
-
-	# Specialix Jetstream 8500 24 port access server.
-	#
-	# If the user name is 10 characters or longer, a "/"
-	# and the excess characters after the 10th are
-	# appended to the user name.
-	#
-	# If you're not running that NAS, you don't need
-	# this hack.
-	with_specialix_jetstream_hack = no
-
-	# Cisco (and Quintum in Cisco mode) sends it's VSA attributes
-	# with the attribute name *again* in the string, like:
-	#
-	#   H323-Attribute = "h323-attribute=value".
-	#
-	# If this configuration item is set to 'yes', then
-	# the redundant data in the the attribute text is stripped
-	# out.  The result is:
-	#
-	#  H323-Attribute = "value"
-	#
-	# If you're not running a Cisco or Quintum NAS, you don't
-	# need this hack.
-	with_cisco_vsa_hack = no
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/python b/src/test/setup/radius-config/freeradius/mods-available/python
deleted file mode 100644
index dcaaef2..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/python
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Make sure the PYTHONPATH environmental variable contains the
-# directory(s) for the modules listed below.
-#
-# Uncomment any func_* which are included in your module. If
-# rlm_python is called for a section which does not have
-# a function defined, it will return NOOP.
-#
-python {
-	module = example
-
-	mod_instantiate = ${.module}
-#	func_instantiate = instantiate
-
-	mod_detach = ${.module}
-#	func_detach = instantiate
-
-	mod_authorize = ${.module}
-#	func_authorize = authorize
-
-	mod_authenticate = ${.module}
-#	func_authenticate = authenticate
-
-	mod_preacct = ${.module}
-#	func_preacct = preacct
-
-	mod_accounting = ${.module}
-#	func_accounting = accounting
-
-	mod_checksimul = ${.module}
-#	func_checksimul = checksimul
-
-	mod_pre_proxy = ${.module}
-#	func_pre_proxy = pre_proxy
-
-	mod_post_proxy = ${.module}
-#	func_post_proxy = post_proxy
-
-	mod_post_auth = ${.module}
-#	func_post_auth = post_auth
-
-	mod_recv_coa = ${.module}
-#	func_recv_coa = recv_coa
-
-	mod_send_coa = ${.module}
-#	func_send_coa = send_coa
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/radutmp b/src/test/setup/radius-config/freeradius/mods-available/radutmp
deleted file mode 100644
index 8430fc1..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/radutmp
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 82319c033bbf349991a46b8f198a5bf5487b5da8 $
-
-#  Write a 'utmp' style file, of which users are currently
-#  logged in, and where they've logged in from.
-#
-#  This file is used mainly for Simultaneous-Use checking,
-#  and also 'radwho', to see who's currently logged in.
-#
-radutmp {
-	#  Where the file is stored.  It's not a log file,
-	#  so it doesn't need rotating.
-	#
-	filename = ${logdir}/radutmp
-
-	#  The field in the packet to key on for the
-	#  'user' name,  If you have other fields which you want
-	#  to use to key on to control Simultaneous-Use,
-	#  then you can use them here.
-	#
-	#  Note, however, that the size of the field in the
-	#  'utmp' data structure is small, around 32
-	#  characters, so that will limit the possible choices
-	#  of keys.
-	#
-	#  You may want instead: %{%{Stripped-User-Name}:-%{User-Name}}
-	username = %{User-Name}
-
-
-	#  Whether or not we want to treat "user" the same
-	#  as "USER", or "User".  Some systems have problems
-	#  with case sensitivity, so this should be set to
-	#  'no' to enable the comparisons of the key attribute
-	#  to be case insensitive.
-	#
-	case_sensitive = yes
-
-	#  Accounting information may be lost, so the user MAY
-	#  have logged off of the NAS, but we haven't noticed.
-	#  If so, we can verify this information with the NAS,
-	#
-	#  If we want to believe the 'utmp' file, then this
-	#  configuration entry can be set to 'no'.
-	#
-	check_with_nas = yes
-
-	# Set the file permissions, as the contents of this file
-	# are usually private.
-	permissions = 0600
-
-	caller_id = "yes"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/realm b/src/test/setup/radius-config/freeradius/mods-available/realm
deleted file mode 100644
index c1984d0..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/realm
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 0b87548aac96952f1fa07410578e482496cb75e8 $
-
-# Realm module, for proxying.
-#
-#  You can have multiple instances of the realm module to
-#  support multiple realm syntaxes at the same time.  The
-#  search order is defined by the order that the modules are listed
-#  in the authorize and preacct sections.
-#
-#  Four config options:
-#	format	 -  must be "prefix" or "suffix"
-#			  The special cases of "DEFAULT"
-#			  and "NULL" are allowed, too.
-#	delimiter      -  must be a single character
-
-#  'realm/username'
-#
-#  Using this entry, IPASS users have their realm set to "IPASS".
-realm IPASS {
-	format = prefix
-	delimiter = "/"
-}
-
-#  'username@realm'
-#
-realm suffix {
-	format = suffix
-	delimiter = "@"
-}
-
-#  'username%realm'
-#
-realm realmpercent {
-	format = suffix
-	delimiter = "%"
-}
-
-#
-#  'domain\user'
-#
-realm ntdomain {
-	format = prefix
-	delimiter = "\\"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/redis b/src/test/setup/radius-config/freeradius/mods-available/redis
deleted file mode 100644
index a47a046..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/redis
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 8750f989839fdcebfe106ef6574e8c96f93cdefa $
-
-#
-#  Configuration file for the "redis" module.  This module does nothing
-#  Other than provide connections to a redis database, and a %{redis: ...}
-#  expansion.
-#
-redis {
-	#  Host where the redis server is located.
-	#  We recommend using ONLY 127.0.0.1 !
-	server = 127.0.0.1
-
-	#  The default port.
-	port = 6379
-
-	#  The password used to authenticate to the server.
-	#  We recommend using a strong password.
-#	password = thisisreallysecretandhardtoguess
-
-	#
-	#  Information for the connection pool.  The configuration items
-	#  below are the same for all modules which use the new
-	#  connection pool.
-	#
-	pool {
-		# start this many connections
-		start = 1
-
-		# Keep at least "min" connections open
-		min = 1
-
-		# No more than "max" connections at any one time
-		max = 10
-
-		# try to keep "spare" connections
-		spare = 0
-
-		# The pool is checked for free connections every
-		# "cleanup_interval".  If there are free connections,
-		# then one of them is closed.
-		cleanup_interval = 300
-
-		# connections last no more than "lifetime" seconds.
-		lifetime = 86400
-
-		# close idle connections are "idle_timeout" seconds
-		idle_timeout = 600
-
-		# allow no more than "uses" queries through a connection.
-		# after that, close it and open a new one.
-		uses = 0
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/rediswho b/src/test/setup/radius-config/freeradius/mods-available/rediswho
deleted file mode 100644
index 0471d26..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/rediswho
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- text -*-
-#
-#  $Id: dba8c583f08db3490f74127d680c3d7ce5d1c572 $
-
-#
-#  Configuration file for the "rediswho" module.
-#
-#  This module tracks the last set of login sessions for a user.
-#
-rediswho {
-	#  How many sessions to keep track of per user.
-	#  If there are more than this number, older sessions are deleted.
-	trim_count = 15
-
-	#  Expiry time in seconds.  Any sessions which have not received
-	#  an update in this time will be automatically expired.
-	expire_time = 86400
-
-	#
-	#  Each subsection contains insert / trim / expire queries.
-	#  The subsections are named after the contents of the
-	#  Acct-Status-Type attribute.  See dictionary.rfc2866 for names
-	#  of the various Acct-Status-Type values, or look at the output
-	#  of debug mode.
-	#
-	#  This module supports *any* Acct-Status-Type.  Just add a subsection
-	#  of the appropriate name, along with insert / trim / expire queries.
-	#
-	Start {
-		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
-		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
-		expire = "EXPIRE %{User-Name} ${..expire_time}"
-	}
-
-	Interim-Update {
-		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
-		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
-		expire = "EXPIRE %{User-Name} ${..expire_time}"
-	}
-
-	Stop {
-		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
-		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
-		expire = "EXPIRE %{User-Name} ${..expire_time}"
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/replicate b/src/test/setup/radius-config/freeradius/mods-available/replicate
deleted file mode 100644
index 6df4523..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/replicate
+++ /dev/null
@@ -1,40 +0,0 @@
-#  Replicate packet(s) to a home server.
-#
-#  This module will open a new socket for each packet, and "clone"
-#  the incoming packet to the destination realm (i.e. home server).
-#
-#  Use it by setting "Replicate-To-Realm = name" in the control list,
-#  just like Proxy-To-Realm.  The configurations for the two attributes
-#  are identical.  The realm must exist, the home_server_pool must exist,
-#  and the home_server must exist.
-#
-#  The only difference is that the "replicate" module sends requests
-#  and does not expect a reply.  Any reply is ignored.
-#
-#  Both Replicate-To-Realm and Proxy-To-Realm can be used at the same time.
-#
-#  To use this module, list "replicate" in the "authorize" or
-#  "accounting" section.  Then, ensure that Replicate-To-Realm is set.
-#  The contents of the "packet" attribute list will be sent to the
-#  home server.  The usual load-balancing, etc. features of the home
-#  server will be used.
-#
-#  "radmin" can be used to mark home servers alive/dead, in order to
-#  enable/disable replication to specific servers.
-#
-#  Packets can be replicated to multiple destinations.  Just set
-#  Replicate-To-Realm multiple times.  One packet will be sent for
-#  each of the Replicate-To-Realm attribute in the "control" list.
-#
-#  If no packets are sent, the module returns "noop".  If at least one
-#  packet is sent, the module returns "ok".  If an error occurs, the
-#  module returns "fail"
-#
-#  Note that replication does NOT change any of the packet statistics.
-#  If you use "radmin" to look at the statistics for a home server,
-#  the replicated packets will cause NO counters to increment.  This
-#  is not a bug, this is how replication works.
-#
-replicate {
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/rest b/src/test/setup/radius-config/freeradius/mods-available/rest
deleted file mode 100644
index 19b9de6..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/rest
+++ /dev/null
@@ -1,132 +0,0 @@
-rest {
-	#
-	#  This subsection configures the tls related items
-	#  that control how FreeRADIUS connects to a HTTPS
-	#  server.
-	#
-	tls {
-#		ca_file	= ${certdir}/cacert.pem
-#		ca_path	= ${certdir}
-
-#		certificate_file	= /path/to/radius.crt
-#		private_key_file	= /path/to/radius.key
-#		private_key_password	= "supersecret"
-#		random_file		= ${certdir}/random
-
-		#  Server certificate verification requirements.  Can be:
-		#    "no"  (don't even bother trying)
-		#    "yes" (verify the cert was issued by one of the
-		#	   trusted CAs)
-		#
-		#  The default is "yes"
-#		check_cert     = "yes"
-
-		#  Server certificate CN verification requirements.  Can be:
-		#    "no"  (don't even bother trying)
-		#    "yes" (verify the CN in the certificate matches the host
-		#	   in the URI)
-		#
-		#  The default is "yes"
-#		check_cert_cn  = "yes"
-	}
-
-	# rlm_rest will open a connection to the server specified in connect_uri
-	# to populate the connection cache, ready for the first request.
-	# The server will not start if the server specified is unreachable.
-	#
-	# If you wish to disable this pre-caching and reachability check,
-	# comment out the configuration item below.
-	connect_uri = "http://127.0.0.1/"
-
-	#
-	#  The following config items can be used in each of the sections.
-	#  The sections themselves reflect the sections in the server.
-	#  For example if you list rest in the authorize section of a virtual server,
-	#  the settings from the authorize section here will be used.
-	#
-	#  The following config items may be listed in any of the sections:
-	#    uri          - to send the request to.
-	#    method       - HTTP method to use, one of 'get', 'post', 'put', 'delete'.
-	#    body         - The format of the HTTP body sent to the remote server.
-	#                   May be 'none', 'post' or 'json', defaults to 'none'.
-	#    tls          - TLS settings for HTTPS.
-	#    auth         - HTTP auth method to use, one of 'none', 'srp', 'basic',
-	#                   'digest', 'digest-ie', 'gss-negotiate', 'ntlm',
-	#                   'ntlm-winbind', 'any', 'safe'. defaults to 'none'.
-	#    username     - User to authenticate as, will be expanded.
-	#    password     - Password to use for authentication, will be expanded.
-	#    require_auth - Require HTTP authentication.
-	#    timeout      - HTTP request timeout in seconds, defaults to 4.
-	#
-	authorize {
-		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=authorize"
-		method = "get"
-		tls = ${..tls}
-	}
-	authenticate {
-		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=authenticate"
-		method = "get"
-		tls = ${..tls}
-	}
-	accounting {
-		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=accounting"
-		method = "post"
-		tls = ${..tls}
-	}
-	session {
-		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=checksimul"
-		method = "post"
-		tls = ${..tls}
-	}
-	post-auth {
-		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=post-auth"
-		method = "post"
-		tls = ${..tls}
-	}
-
-	#
-	#  The connection pool is new for 3.0, and will be used in many
-	#  modules, for all kinds of connection-related activity.
-	#
-	pool {
-		# Number of connections to start
-		start = 5
-
-		# Minimum number of connections to keep open
-		min = 4
-
-		# Maximum number of connections
-		#
-		# If these connections are all in use and a new one
-		# is requested, the request will NOT get a connection.
-		max = 10
-
-		# Spare connections to be left idle
-		#
-		# NOTE: Idle connections WILL be closed if "idle_timeout"
-		# is set.
-		spare = 3
-
-		# Number of uses before the connection is closed
-		#
-		# 0 means "infinite"
-		uses = 0
-
-		# The lifetime (in seconds) of the connection
-		lifetime = 0
-
-		# idle timeout (in seconds).  A connection which is
-		# unused for this length of time will be closed.
-		idle_timeout = 60
-
-		# NOTE: All configuration settings are enforced.  If a
-		# connection is closed because of "idle_timeout",
-		# "uses", or "lifetime", then the total number of
-		# connections MAY fall below "min".  When that
-		# happens, it will open a new connection.  It will
-		# also log a WARNING message.
-		#
-		# The solution is to either lower the "min" connections,
-		# or increase lifetime/idle_timeout.
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/smbpasswd b/src/test/setup/radius-config/freeradius/mods-available/smbpasswd
deleted file mode 100644
index de400ee..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/smbpasswd
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- text -*-
-#
-#  $Id: d5ad2a06c767f07722dc9b9c4b13d00c26b5a280 $
-
-#  An example configuration for using /etc/smbpasswd.
-#
-#  See the "passwd" file for documentation on the configuration items
-#  for this module.
-#
-passwd smbpasswd {
-	filename = /etc/smbpasswd
-	format = "*User-Name::LM-Password:NT-Password:SMB-Account-CTRL-TEXT::"
-	hash_size = 100
-	ignore_nislike = no
-	allow_multiple_keys = no
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/smsotp b/src/test/setup/radius-config/freeradius/mods-available/smsotp
deleted file mode 100644
index 876931c..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/smsotp
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 3be32b85f56a84725fe1a6bf508e459dbe6c4e02 $
-
-#  SMS One-time Password system.
-#
-#  This module will extend FreeRadius with a socks interface to create and
-#  validate One-Time-Passwords. The program for that creates the socket
-#  and interacts with this module is not included here.
-#
-#  The module does not check the User-Password, this should be done with
-#  the "pap" module.  See the example below.
-#
-#  The module must be used in the "authorize" section to set
-#  Auth-Type properly.  The first time through, the module is called
-#  in the "authenticate" section to authenticate the user password, and
-#  to send the challenge.  The second time through, it authenticates
-#  the response to the challenge. e.g.:
-#
-#  authorize {
-#	...
-#	smsotp
-#	...
-#  }
-#
-#  authenticate {
-#	...
-#	Auth-Type smsotp {
-#		pap
-#		smsotp
-#	}
-#
-#	Auth-Type smsotp-reply {
-#		smsotp
-#	}
-#	...
-#  }
-#
-smsotp {
-	#  The location of the socket.
-	socket = "/var/run/smsotp_socket"
-
-	#  Defines the challenge message that will be send to the
-	#  NAS. Default is "Enter Mobile PIN" }
-	challenge_message = "Enter Mobile PIN:"
-
-	#  Defines the Auth-Type section that is run for the response to
-	#  the challenge. Default is "smsotp-reply".
-	challenge_type = "smsotp-reply"
-
-	#  Control how many sockets are used to talk to the SMSOTPd
-	#
-	pool {
-		# Number of connections to start
-		start = 5
-
-		# Minimum number of connections to keep open
-		min = 4
-
-		# Maximum number of connections
-		#
-		# If these connections are all in use and a new one
-		# is requested, the request will NOT get a connection.
-		max = 10
-
-		# Spare connections to be left idle
-		#
-		# NOTE: Idle connections WILL be closed if "idle_timeout"
-		# is set.
-		spare = 3
-
-		# Number of uses before the connection is closed
-		#
-		# 0 means "infinite"
-		uses = 0
-
-		# The lifetime (in seconds) of the connection
-		lifetime = 0
-
-		# idle timeout (in seconds).  A connection which is
-		# unused for this length of time will be closed.
-		idle_timeout = 60
-
-		# NOTE: All configuration settings are enforced.  If a
-		# connection is closed because of "idle_timeout",
-		# "uses", or "lifetime", then the total number of
-		# connections MAY fall below "min".  When that
-		# happens, it will open a new connection.  It will
-		# also log a WARNING message.
-		#
-		# The solution is to either lower the "min" connections,
-		# or increase lifetime/idle_timeout.
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/soh b/src/test/setup/radius-config/freeradius/mods-available/soh
deleted file mode 100644
index d125ce4..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/soh
+++ /dev/null
@@ -1,4 +0,0 @@
-# SoH module
-soh {
-	dhcp = yes
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sometimes b/src/test/setup/radius-config/freeradius/mods-available/sometimes
deleted file mode 100644
index 094426d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/sometimes
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 3a96622cc938f558b023e1110769a46861716a12 $
-
-#
-# The "sometimes" module is here for debugging purposes. Each instance
-# randomly returns the configured result, or "noop".
-#
-# It is based on the "always" module.
-sometimes {
-	rcode = fail
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sql b/src/test/setup/radius-config/freeradius/mods-available/sql
deleted file mode 100644
index eea2245..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/sql
+++ /dev/null
@@ -1,1003 +0,0 @@
-# -*- text -*-
-##
-## sql.conf -- SQL modules
-##
-##	$Id: e1431d634a28f20a0e5deaeedd66a161deb88eb7 $
-
-######################################################################
-#
-#  Configuration for the SQL module
-#
-#  The database schemas and queries are located in subdirectories:
-#
-#	sql/<DB>/main/schema.sql	Schema
-#	sql/<DB>/main/queries.conf	Authorisation and Accounting queries
-#
-#  Where "DB" is mysql, mssql, oracle, or postgresql.
-#
-#
-
-sql {
-	# The sub-module to use to execute queries. This should match
-	# the database you're attempting to connect to.
-	#
-	#    * rlm_sql_mysql
-	#    * rlm_sql_mssql
-	#    * rlm_sql_oracle
-	#    * rlm_sql_postgresql
-	#    * rlm_sql_sqlite
-	#    * rlm_sql_null (log queries to disk)
-	#
-	driver = "rlm_sql_sqlite"
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
- 
-	sqlite {
-		filename = "/opt/db/radius.sqlite3"
-	}
-
-#
-#	Several drivers accept specific options, to set them, a
-#	config section with the the name as the driver should be added
-#	to the sql instance.
-#
-#	Driver specific options are:
-#
-#	sqlite {
-#		# Path to the sqlite database
-#		filename = "/my/sqlite/database.db"
-#
-#		# If the file above does not exist and bootstrap is set
-#		# a new database file will be created, and the SQL statements
-#		# contained within the file will be executed.
-#		bootstrap = "/my/sqlite/schema.sql"
-# 	}
-#
-#	mysql {
-#		# If any of the below files are set tls encryption is enabled
-#		tls {
-#			ca_file = "/etc/ssl/certs/my_ca.crt"
-#			ca_path = "/etc/ssl/certs/"
-#			certificate_file = "/etc/ssl/certs/private/client.crt"
-#			private_key_file = "/etc/ssl/certs/private/client.key"
-#			cipher = "DHE-RSA-AES256-SHA:AES128-SHA"
-#		}
-#	}
-#
-
-	# The dialect of SQL you want to use, this should usually match
-	# the driver you selected above.
-	#
-	# If you're using rlm_sql_null, then it should be the type of
-	# database the logged queries are going to be executed against.
-	dialect = "sqlite"
-
-	# Connection info:
-	#
-#	server = "localhost"
-#	port = 3306
-#	login = "radius"
-#	password = "radpass"
-
-	# Database table configuration for everything except Oracle
-	radius_db = "radius"
-
-	# If you are using Oracle then use this instead
-#	radius_db = "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=localhost)(PORT=1521))(CONNECT_DATA=(SID=your_sid)))"
-
-	# If you're using postgresql this can also be used instead of the connection info parameters
-#	radius_db = "dbname=radius host=localhost user=radius password=raddpass"
-
-	# If you want both stop and start records logged to the
-	# same SQL table, leave this as is.  If you want them in
-	# different tables, put the start table in acct_table1
-	# and stop table in acct_table2
-	acct_table1 = "radacct"
-	acct_table2 = "radacct"
-
-	# Allow for storing data after authentication
-	postauth_table = "radpostauth"
-
-	# Tables containing 'check' items
-	authcheck_table = "radcheck"
-	groupcheck_table = "radgroupcheck"
-
-	# Tables containing 'reply' items
-	authreply_table = "radreply"
-	groupreply_table = "radgroupreply"
-
-	# Table to keep group info
-	usergroup_table = "radusergroup"
-
-	# If set to 'yes' (default) we read the group tables
-	# If set to 'no' the user MUST have Fall-Through = Yes in the radreply table
-	# read_groups = yes
-
-	# Remove stale session if checkrad does not see a double login
-	delete_stale_sessions = yes
-
-	# Write SQL queries to a logfile. This is potentially useful for tracing
-	# issues with authorization queries.
-#	logfile = ${logdir}/sqllog.sql
-
-	#  As of version 3.0, the "pool" section has replaced the
-	#  following configuration items:
-	#
-	#  num_sql_socks
-	#  connect_failure_retry_delay
-	#  lifetime
-	#  max_queries
-
-	#
-	#  The connection pool is new for 3.0, and will be used in many
-	#  modules, for all kinds of connection-related activity.
-	#
-	# When the server is not threaded, the connection pool
-	# limits are ignored, and only one connection is used.
-	#
-	pool {
-		# Number of connections to start
-		start = 5
-
-		# Minimum number of connections to keep open
-		min = 4
-
-		# Maximum number of connections
-		#
-		# If these connections are all in use and a new one
-		# is requested, the request will NOT get a connection.
-		#
-		# Setting 'max' to LESS than the number of threads means
-		# that some threads may starve, and you will see errors
-		# like "No connections available and at max connection limit"
-		#
-		# Setting 'max' to MORE than the number of threads means
-		# that there are more connections than necessary.
-		#
-		max = ${thread[pool].max_servers}
-
-		# Spare connections to be left idle
-		#
-		# NOTE: Idle connections WILL be closed if "idle_timeout"
-		# is set.
-		spare = 3
-
-		# Number of uses before the connection is closed
-		#
-		# 0 means "infinite"
-		uses = 0
-
-		# The lifetime (in seconds) of the connection
-		lifetime = 0
-
-		# idle timeout (in seconds).  A connection which is
-		# unused for this length of time will be closed.
-		idle_timeout = 60
-
-		# NOTE: All configuration settings are enforced.  If a
-		# connection is closed because of "idle_timeout",
-		# "uses", or "lifetime", then the total number of
-		# connections MAY fall below "min".  When that
-		# happens, it will open a new connection.  It will
-		# also log a WARNING message.
-		#
-		# The solution is to either lower the "min" connections,
-		# or increase lifetime/idle_timeout.
-	}
-
-	# Set to 'yes' to read radius clients from the database ('nas' table)
-	# Clients will ONLY be read on server startup.
-#	read_clients = yes
-
-	# Table to keep radius client info
-	client_table = "nas"
-
-	# Read database-specific queries
-	$INCLUDE ${modconfdir}/${.:name}/main/${dialect}/queries.conf
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sqlcounter b/src/test/setup/radius-config/freeradius/mods-available/sqlcounter
deleted file mode 100644
index 89d6d40..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/sqlcounter
+++ /dev/null
@@ -1,95 +0,0 @@
-#  Rather than maintaining separate (GDBM) databases of
-#  accounting info for each counter, this module uses the data
-#  stored in the raddacct table by the sql modules. This
-#  module NEVER does any database INSERTs or UPDATEs.  It is
-#  totally dependent on the SQL module to process Accounting
-#  packets.
-#
-#  The sql-module-instance' parameter holds the instance of the sql
-#  module to use when querying the SQL database. Normally it
-#  is just "sql".  If you define more and one SQL module
-#  instance (usually for failover situations), you can
-#  specify which module has access to the Accounting Data
-#  (radacct table).
-#
-#  The 'reset' parameter defines when the counters are all
-#  reset to zero.  It can be hourly, daily, weekly, monthly or
-#  never.  It can also be user defined. It should be of the
-#  form:
-#  	num[hdwm] where:
-#  	h: hours, d: days, w: weeks, m: months
-#  	If the letter is ommited days will be assumed. In example:
-#  	reset = 10h (reset every 10 hours)
-#  	reset = 12  (reset every 12 days)
-#
-#  The 'key' parameter specifies the unique identifier for the
-#  counter records (usually 'User-Name').
-#
-#  The 'query' parameter specifies the SQL query used to get
-#  the current Counter value from the database. There are 3
-#  parameters that can be used in the query:
-#		%b	unix time value of beginning of reset period
-#		%e	unix time value of end of reset period
-#
-#  The 'check_name' parameter is the name of the 'check'
-#  attribute to use to access the counter in the 'users' file
-#  or SQL radcheck or radcheckgroup tables.
-#
-#  DEFAULT  Max-Daily-Session > 3600, Auth-Type = Reject
-#      Reply-Message = "You've used up more than one hour today"
-#
-sqlcounter dailycounter {
-	sql_module_instance = sql
-	dialect = ${modules.sql.dialect}
-
-	counter_name = Daily-Session-Time
-	check_name = Max-Daily-Session
-	reply_name = Session-Timeout
-
-	key = User-Name
-	reset = daily
-
-	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
-}
-
-sqlcounter monthlycounter {
-	sql_module_instance = sql
-	dialect = ${modules.sql.dialect}
-
-	counter_name = Monthly-Session-Time
-	check_name = Max-Monthly-Session
-	reply_name = Session-Timeout
-	key = User-Name
-	reset = monthly
-
-	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
-}
-
-sqlcounter noresetcounter {
-	sql_module_instance = sql
-	dialect = ${modules.sql.dialect}
-
-	counter_name = Max-All-Session-Time
-	check_name = Max-All-Session
-	key = User-Name
-	reset = never
-
-	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
-}
-
-#
-#  Set an account to expire T seconds after first login.
-#  Requires the Expire-After attribute to be set, in seconds.
-#  You may need to edit raddb/dictionary to add the Expire-After
-#  attribute.
-sqlcounter expire_on_login {
-	sql_module_instance = sql
-	dialect = ${modules.sql.dialect}
-
-	counter_name = Expire-After-Initial-Login
-	check_name = Expire-After
-	key = User-Name
-	reset = never
-
-	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sqlippool b/src/test/setup/radius-config/freeradius/mods-available/sqlippool
deleted file mode 100644
index 269c072..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/sqlippool
+++ /dev/null
@@ -1,65 +0,0 @@
-#  Configuration for the SQL based IP Pool module (rlm_sqlippool)
-#
-#  The database schemas are available at:
-#
-#       raddb/sql/ippool/<DB>/schema.sql
-#
-#  $Id: 26960222182b4656ed895e365a4ca4659d87e2a9 $
-
-sqlippool {
-	# SQL instance to use (from sql.conf)
-	#
-	#  If you have multiple sql instances, such as "sql sql1 {...}",
-	#  use the *instance* name here: sql1.
-	sql_module_instance = "sql"
-
-	#  This is duplicative of info available in the SQL module, but
-	#  we have to list it here as we do not yet support nested
-	#  reference expansions.
-	dialect = "mysql"
-
-	# SQL table to use for ippool range and lease info
-	ippool_table = "radippool"
-
-	# IP lease duration. (Leases expire even if Acct Stop packet is lost)
-	lease_duration = 3600
-
-	# Attribute which should be considered unique per NAS
-	#
-	#  Using NAS-Port gives behaviour similar to rlm_ippool. (And ACS)
-	#  Using Calling-Station-Id works for NAS that send fixed NAS-Port
-	#  ONLY change this if you know what you are doing!
-	pool_key = "%{NAS-Port}"
-	# pool_key = "%{Calling-Station-Id}"
-
-	################################################################
-	#
-	#  WARNING: MySQL (MyISAM) has certain limitations that means it can
-	#           hand out the same IP address to 2 different users.
-	#
-	#           We suggest using an SQL DB with proper transaction
-	#           support, such as PostgreSQL, or using MySQL
-	#	     with InnoDB.
-	#
-	################################################################
-
-	#  These messages are added to the "control" items, as
-	#  Module-Success-Message.  They are not logged anywhere else,
-	#  unlike previous versions.  If you want to have them logged
-	#  to a file, see the "linelog" module, and create an entry
-	#  which writes Module-Success-Message message.
-	#
-	messages {
-		exists = "Existing IP: %{reply:Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-		success = "Allocated IP: %{reply:Framed-IP-Address} from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-		clear = "Released IP %{Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} user %{User-Name})"
-
-		failed = "IP Allocation FAILED from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-
-		nopool = "No Pool-Name defined (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
-	}
-
-	$INCLUDE ${modconfdir}/sql/ippool/${dialect}/queries.conf
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sradutmp b/src/test/setup/radius-config/freeradius/mods-available/sradutmp
deleted file mode 100644
index 8e28704..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/sradutmp
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 3a2a0e502e76ec00d4ec17e70132448e1547da46 $
-
-# "Safe" radutmp - does not contain caller ID, so it can be
-# world-readable, and radwho can work for normal users, without
-# exposing any information that isn't already exposed by who(1).
-#
-# This is another 'instance' of the radutmp module, but it is given
-# then name "sradutmp" to identify it later in the "accounting"
-# section.
-radutmp sradutmp {
-	filename = ${logdir}/sradutmp
-	permissions = 0644
-	caller_id = "no"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unbound b/src/test/setup/radius-config/freeradius/mods-available/unbound
deleted file mode 100644
index 9fd9b1f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/unbound
+++ /dev/null
@@ -1,4 +0,0 @@
-unbound dns {
-	# filename = "${raddbdir}/mods-config/unbound/default.conf"
-	# timeout = 3000
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unix b/src/test/setup/radius-config/freeradius/mods-available/unix
deleted file mode 100644
index a5798d5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/unix
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 5165139aaf39d533581161871542b48a6e3e8c42 $
-
-# Unix /etc/passwd style authentication
-#
-#  This module calls the system functions to get the "known good"
-#  password.  This password is usually in the "crypt" form, and is
-#  incompatible with CHAP, MS-CHAP, PEAP, etc.
-#
-#  If passwords are in /etc/shadow, you will need to set the "group"
-#  configuration in radiusd.conf.  Look for "shadow", and follow the
-#  instructions there.
-#
-unix {
-	#
-	#  The location of the "wtmp" file.
-	#  The only use for 'radlast'.  If you don't use
-	#  'radlast', then you can comment out this item.
-	#
-	#  Note that the radwtmp file may get large!  You should
-	#  rotate it (cp /dev/null radwtmp), or just not use it.
-	#
-	radwtmp = ${logdir}/radwtmp
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unpack b/src/test/setup/radius-config/freeradius/mods-available/unpack
deleted file mode 100644
index 6e42ad1..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/unpack
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- text -*-
-#
-#  $Id: 2a1e130d315daa247167372773c1994e3200f332 $
-
-#
-#  This module is useful only for 'xlat'.  To use it,
-#  add it to the raddb/mods-enabled/ directory.  Then,
-#  use it on the right-hand side of a variable assignment.
-#
-#  ... = "%{unpack:data 1 integer}"
-#
-#  The arguments are three fields:
-#
-#	data
-#		Either &Attribute-Name
-#		the name of the attribute to unpack.
-#		MUST be a "string" or "octets" type.
-#
-#		or 0xabcdef
-#		e.g. hex data.
-#
-#	1
-#		The offset into the string from which
-#		it starts unpacking.  The offset starts
-#		at zero, for the first attribute.
-#
-#	integer
-#		the data type to unpack at that offset.
-#		e.g. integer, ipaddr, byte, short, etc.
-#
-#  e.g. if we have Class = 0x00000001020304, then
-#
-#	%{unpack:&Class 4 short}
-#
-#  will unpack octets 4 and 5 as a "short", which has
-#  value 0x0304.
-#
-#  This module is used when vendors put multiple fields
-#  into one attribute of type "octets".
-#
-unpack {
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/utf8 b/src/test/setup/radius-config/freeradius/mods-available/utf8
deleted file mode 100644
index 00812fa..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/utf8
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-#  Enforces UTF-8 on strings coming in from the NAS.
-#
-#  An attribute of type "string" containing UTF-8 makes
-#  the module return NOOP.
-#
-#  An attribute of type "string" containing non-UTF-8 data
-#  makes the module return FAIL.
-#
-#  This module takes no configuration.
-#
-utf8 {
-
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/wimax b/src/test/setup/radius-config/freeradius/mods-available/wimax
deleted file mode 100644
index c2aa42f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/wimax
+++ /dev/null
@@ -1,112 +0,0 @@
-#
-#	The WiMAX module currently takes no configuration.
-#
-#	It should be listed in the "authorize" and "preacct" sections.
-#	This enables the module to fix the horrible binary version
-#	of Calling-Station-Id to the normal format, as specified in
-#	RFC 3580, Section 3.21.
-#
-#	In order to calculate the various WiMAX keys, the module should
-#	be listed in the "post-auth" section.  If EAP authentication
-#	has been used, AND the EAP method derives MSK and EMSK, then
-#	the various WiMAX keys can be calculated.
-#
-#	Some useful things to remember:
-#
-#	WiMAX-MSK = EAP MSK, but is 64 octets.
-#
-#	MIP-RK-1 = HMAC-SHA256(ESMK, "miprk@wimaxforum.org" | 0x00020001)
-#	MIP-RK-2 = HMAC-SHA256(ESMK, MIP-RK-1 | "miprk@wimaxforum.org" | 0x00020002)
-#	MIP-RK = MIP-RK-1 | MIP-RK-2
-#
-#	MIP-SPI = first 4 octets of HMAC-SHA256(MIP-RK, "SPI CMIP PMIP")
-#		plus some magic... you've got to track *all* MIP-SPI's
-#		on your system!
-#
-#	SPI-CMIP4 = MIP-SPI
-#	SPI-PMIP4 = MIP-SPI + 1
-#	SPI-CMIP6 = MIP-SPI + 2
-#
-#	MN-NAI is the Mobile node NAI.  You have to create it, and put
-#	it into the request or reply as something like:
-#
-#		WiMAX-MN-NAI = "%{User-Name}"
-#
-#	You will also have to have the appropriate IP address (v4 or v6)
-#	in order to calculate the keys below.
-#
-#	Lifetimes are derived from Session-Timeout.  It needs to be set
-#	to some useful number.
-#
-#	The hash function below H() is HMAC-SHA1.
-#
-#
-#	MN-HA-CMIP4 = H(MIP-RK, "CMIP4 MN HA" | HA-IPv4 | MN-NAI)
-#
-#		Where HA-IPv4 is	WiMAX-hHA-IP-MIP4
-#		or maybe		WiMAX-vHA-IP-MIP4
-#
-#		Which goes into		WiMAX-MN-hHA-MIP4-Key
-#		or maybe		WiMAX-RRQ-MN-HA-Key
-#		or maybe even		WiMAX-vHA-MIP4-Key
-#
-#	The corresponding SPI is SPI-CMIP4, which is MIP-SPI,
-#
-#		which goes into		WiMAX-MN-hHA-MIP4-SPI
-#		or maybe		WiMAX-RRQ-MN-HA-SPI
-#		or even			WiMAX-MN-vHA-MIP4-SPI
-#
-#	MN-HA-PMIP4 = H(MIP-RK, "PMIP4 MN HA" | HA-IPv4 | MN-NAI)
-#	MN-HA-CMIP6 = H(MIP-RK, "CMIP6 MN HA" | HA-IPv6 | MN-NAI)
-#
-#		both with similar comments to above for MN-HA-CMIP4.
-#
-#	In order to tell which one to use (CMIP4, PMIP4, or CMIP6),
-#	you have to set WiMAX-IP-Technology in the reply to one of
-#	the appropriate values.
-#
-#
-#	FA-RK = H(MIP-RK, "FA-RK")
-#
-#	MN-FA = H(FA-RK, "MN FA" | FA-IP | MN-NAI)
-#
-#		Where does the FA-IP come from?  No idea...
-#
-#
-#	The next two keys (HA-RK and FA-HA) are not generated
-#	for every authentication request, but only on demand.
-#
-#	HA-RK = 160-bit random number assigned by the AAA server
-#		to a specific HA.
-#
-#	FA-HA = H(HA-RK, "FA-HA" | HA-IPv4 | FA-CoAv4 | SPI)
-#
-#		where HA-IPv4 is as above.
-#		and FA-CoAv4 address of the FA as seen by the HA
-#		and SPI is the relevant SPI for the HA-RK.
-#
-#	DHCP-RK = 160-bit random number assigned by the AAA server
-#		  to a specific DHCP server.  vDHCP-RK is the same
-#		  thing.
-#
-wimax {
-	#
-	#  Some WiMAX equipment requires that the MS-MPPE-*-Key
-	#  attributes are sent in the Access-Accept, in addition to
-	#  the WiMAX-MSK attribute.
-	#
-	#  Other WiMAX equipment request that the MS-MPPE-*-Key
-	#  attributes are NOT sent in the Access-Accept.
-	#
-	#  By default, the EAP modules sends MS-MPPE-*-Key attributes.
-	#  The default virtual server (raddb/sites-available/default)
-	#  contains examples of adding the WiMAX-MSK.
-	#
-	#  This configuration option makes the WiMAX module delete
-	#  the MS-MPPE-*-Key attributes.  The default is to leave
-	#  them in place.
-	#
-	#  If the keys are deleted (by setting this to "yes"), then
-	#  the WiMAX-MSK attribute is automatically added to the reply.
-	delete_mppe_keys = no
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/yubikey b/src/test/setup/radius-config/freeradius/mods-available/yubikey
deleted file mode 100644
index d21c136..0000000
--- a/src/test/setup/radius-config/freeradius/mods-available/yubikey
+++ /dev/null
@@ -1,141 +0,0 @@
-#
-#  This module decrypts and validates Yubikey static and dynamic
-#  OTP tokens.
-#
-yubikey {
-	#
-	#  The length (number of ASCII bytes) of the Public-ID portion
-	#  of the OTP string.
-	#
-	#  Yubikey defaults to a 6 byte ID (2 * 6 = 12)
-#	id_length = 12
-
-	#
-	#  If true, the authorize method of rlm_yubikey will attempt to split the
-	#  value of User-Password, into the user's password, and the OTP token.
-	#
-	#  If enabled and successful, the value of User-Password will be truncated
-	#  and request:Yubikey-OTP will be added.
-	#
-#	split = yes
-
-	#
-	#  Decrypt mode - Tokens will be decrypted and processed locally
-	#
-	#  The module itself does not provide persistent storage as this
-	#  would be duplicative of functionality already in the server.
-	#
-	#  Yubikey authentication needs two control attributes
-	#  retrieved from persistent storage:
-	#    * Yubikey-Key     - The AES key used to decrypt the OTP data.
-	#                        The Yubikey-Public-Id and/or User-Name
-	#                        attributes may be used to retrieve the key.
-	#    * Yubikey-Counter - This is compared with the counter in the OTP
-	#                        data and used to prevent replay attacks.
-	#                        This attribute will also be available in
-	#                        the request list after successful
-	#                        decryption.
-	#
-	#  Yubikey-Counter isn't strictly required, but the server will
-	#  generate warnings if it's not present when yubikey.authenticate
-	#  is called.
-	#
-	#  These attributes are available after authorization:
-	#    * Yubikey-Public-ID  - The public portion of the OTP string
-	#
-	#  These attributes are available after authentication (if successful):
-	#    * Yubikey-Private-ID - The encrypted ID included in OTP data,
-	#                           must be verified if tokens share keys.
-	#    * Yubikey-Counter    - The last counter value (should be recorded).
-	#    * Yubikey-Timestamp  - Token's internal clock (mainly useful for debugging).
-	#    * Yubikey-Random     - Randomly generated value from the token.
-	#
-	decrypt = no
-
-	#
-	#  Validation mode - Tokens will be validated against a Yubicloud server
-	#
-	validate = no
-
-	#
-	#  Settings for validation mode.
-	#
-	validation {
-		#
-		#  URL of validation server, multiple URL config items may be used
-		#  to list multiple servers.
-		#
-		# - %d is a placeholder for public ID of the token
-		# - %s is a placeholder for the token string itself
-		#
-		#  If no URLs are listed, will default to the default URLs in the
-		#  ykclient library, which point to the yubico validation servers.
-		servers {
-#			uri = 'http://api.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
-#			uri = 'http://api2.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
-		}
-
-		#
-		#  API Client ID
-		#
-		#  Must be set to your client id for the validation server.
-		#
-#		client_id = 00000
-
-		#
-		#  API Secret key (Base64 encoded)
-		#
-		#  Must be set to your API key for the validation server.
-		#
-#		api_key = '000000000000000000000000'
-
-		#
-		#  Connection pool parameters
-		#
-		pool {
-			# Number of connections to start
-			start = 5
-
-			# Minimum number of connections to keep open
-			min = 4
-
-			# Maximum number of connections
-			#
-			# If these connections are all in use and a new one
-			# is requested, the request will NOT get a connection.
-			max = 10
-
-			# Spare connections to be left idle
-			#
-			# NOTE: Idle connections WILL be closed if "idle_timeout"
-			# is set.
-			spare = 3
-
-			# Number of uses before the connection is closed
-			#
-			# 0 means "infinite"
-			uses = 0
-
-			# The lifetime (in seconds) of the connection
-			lifetime = 0
-
-			# idle timeout (in seconds).  A connection which is
-			# unused for this length of time will be closed.
-			idle_timeout = 60
-
-			# Cycle over all connections in a pool instead of concentrating
-			# connection use on a few connections.
-			spread = yes
-
-			# NOTE: All configuration settings are enforced.  If a
-			# connection is closed because of "idle_timeout",
-			# "uses", or "lifetime", then the total number of
-			# connections MAY fall below "min".  When that
-			# happens, it will open a new connection.  It will
-			# also log a WARNING message.
-			#
-			# The solution is to either lower the "min" connections,
-			# or increase lifetime/idle_timeout.
-		}
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/README.rst b/src/test/setup/radius-config/freeradius/mods-config/README.rst
deleted file mode 100644
index abb4c8d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/README.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-The mods-config Directory
-=========================
-
-This directory contains module-specific configuration files.  These
-files are in a format different from the one used by the main
-`radiusd.conf` files.  Earlier versions of the server had many
-module-specific files in the main `raddb` directory.  The directory
-contained many files, and it was not clear which files did what.
-
-For Version 3 of FreeRADIUS, we have moved to a consistent naming
-scheme.  Each module-specific configuration file is placed in this
-directory, in a subdirectory named for the module.  Where necessary,
-files in the subdirectory have been named for the processing section
-where they are used.
-
-For example, the `users` file is now located in
-`mods-config/files/authorize`.  That filename tells us three things:
-
-1. The file is used in the `authorize` section.
-2. The file is used by the `files` module.
-3. It is a "module configuration" file, which is a specific format.
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge
deleted file mode 100644
index 528670c..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-#	Configuration file for the rlm_attr_filter module.
-#	Please see rlm_attr_filter(5) manpage for more information.
-#
-#	$Id: 12ed619cf16f7322221ef2dfaf28f9c36c616e3c $
-#
-#	This configuration file is used to remove almost all of the
-#	attributes From an Access-Challenge message.  The RFCs say
-#	that an Access-Challenge packet can contain only a few
-#	attributes.  We enforce that here.
-#
-DEFAULT
-	EAP-Message =* ANY,
-	State =* ANY,
-	Message-Authenticator =* ANY,
-	Reply-Message =* ANY,
-	Proxy-State =* ANY,
-	Session-Timeout =* ANY,
-	Idle-Timeout =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject
deleted file mode 100644
index e5a122b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-#	Configuration file for the rlm_attr_filter module.
-#	Please see rlm_attr_filter(5) manpage for more information.
-#
-#	$Id: 251f79c9b50d317aec0b31d2c4ff2208ef596509 $
-#
-#	This configuration file is used to remove almost all of the attributes
-#	From an Access-Reject message.  The RFCs say that an Access-Reject
-#	packet can contain only a few attributes.  We enforce that here.
-#
-DEFAULT
-	EAP-Message =* ANY,
-	State =* ANY,
-	Message-Authenticator =* ANY,
-	Reply-Message =* ANY,
-	MS-CHAP-Error =* ANY,
-	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response
deleted file mode 100644
index eb72eec..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-#	Configuration file for the rlm_attr_filter module.
-#	Please see rlm_attr_filter(5) manpage for more information.
-#
-#	$Id: 3746ce4da3d58fcdd0b777a93e599045353c27ac $
-#
-#	This configuration file is used to remove almost all of the attributes
-#	From an Accounting-Response message.  The RFC's say that an
-#	Accounting-Response packet can contain only a few attributes.
-#	We enforce that here.
-#
-DEFAULT
-	Vendor-Specific =* ANY,
-	Message-Authenticator =* ANY,
-	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy
deleted file mode 100644
index 555ee48..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-#	Configuration file for the rlm_attr_filter module.
-#	Please see rlm_attr_filter(5) manpage for more information.
-#
-#	$Id: 21a3af9c7ad97563b372d445bee2b37d564448fe $
-#
-#	This file contains security and configuration information
-#	for each realm. The first field is the realm name and
-#	can be up to 253 characters in length. This is followed (on
-#	the next line) with the list of filter rules to be used to
-#	decide what attributes and/or values we allow proxy servers
-#	to pass to the NAS for this realm.
-#
-#	When a proxy-reply packet is received from a home server,
-#	these attributes and values are tested. Only the first match
-#	is used unless the "Fall-Through" variable is set to "Yes".
-#	In that case the rules defined in the DEFAULT case are
-#	processed as well.
-#
-#	A special realm named "DEFAULT" matches on all realm names.
-#	You can have only one DEFAULT entry. All entries are processed
-#	in the order they appear in this file. The first entry that
-#	matches the login-request will stop processing unless you use
-#	the Fall-Through variable.
-#
-#	Indented (with the tab character) lines following the first
-#	line indicate the filter rules.
-#
-#	You can include another `attrs' file with `$INCLUDE attrs.other'
-#
-
-#
-# This is a complete entry for realm "fisp". Note that there is no
-# Fall-Through entry so that no DEFAULT entry will be used, and the
-# server will NOT allow any other a/v pairs other than the ones
-# listed here.
-#
-# These rules allow:
-#     o  Only Framed-User Service-Types ( no telnet, rlogin, tcp-clear )
-#     o  PPP sessions ( no SLIP, CSLIP, etc. )
-#     o  dynamic ip assignment ( can't assign a static ip )
-#     o  an idle timeout value set to 600 seconds (10 min) or less
-#     o  a max session time set to 28800 seconds (8 hours) or less
-#
-#fisp
-#	Service-Type == Framed-User,
-#	Framed-Protocol == PPP,
-#	Framed-IP-Address == 255.255.255.254,
-#	Idle-Timeout <= 600,
-#	Session-Timeout <= 28800
-
-#
-# This is a complete entry for realm "tisp". Note that there is no
-# Fall-Through entry so that no DEFAULT entry will be used, and the
-# server will NOT allow any other a/v pairs other than the ones
-# listed here.
-#
-# These rules allow:
-#       o Only Login-User Service-Type ( no framed/ppp sessions )
-#       o Telnet sessions only ( no rlogin, tcp-clear )
-#       o Login hosts of either 192.0.2.1 or 192.0.2.2
-#
-#tisp
-#	Service-Type == Login-User,
-#	Login-Service == Telnet,
-#	Login-TCP-Port == 23,
-#	Login-IP-Host == 192.0.2.1,
-#	Login-IP-Host == 192.0.2.2
-
-#
-# The following example can be used for a home server which is only
-# allowed to supply a Reply-Message, a Session-Timeout attribute of
-# maximum 86400, a Idle-Timeout attribute of maximum 600 and a
-# Acct-Interim-Interval attribute between 300 and 3600.
-# All other attributes sent back will be filtered out.
-#
-#strictrealm
-#	Reply-Message =* ANY,
-#	Session-Timeout <= 86400,
-#	Idle-Timeout <= 600,
-#	Acct-Interim-Interval >= 300,
-#	Acct-Interim-Interval <= 3600
-
-#
-# This is a complete entry for realm "spamrealm". Fall-Through is used,
-# so that the DEFAULT filter rules are used in addition to these.
-#
-# These rules allow:
-#       o Force the application of Filter-ID attribute to be returned
-#         in the proxy reply, whether the proxy sent it or not.
-#       o The standard DEFAULT rules as defined below
-#
-#spamrealm
-#	Framed-Filter-Id := "nosmtp.in",
-#	Fall-Through = Yes
-
-#
-# The rest of this file contains the DEFAULT entry.
-# DEFAULT matches with all realm names. (except if the realm previously
-# matched an entry with no Fall-Through)
-#
-
-DEFAULT
-	Service-Type == Framed-User,
-	Service-Type == Login-User,
-	Login-Service == Telnet,
-	Login-Service == Rlogin,
-	Login-Service == TCP-Clear,
-	Login-TCP-Port <= 65536,
-	Framed-IP-Address == 255.255.255.254,
-	Framed-IP-Netmask == 255.255.255.255,
-	Framed-Protocol == PPP,
-	Framed-Protocol == SLIP,
-	Framed-Compression == Van-Jacobson-TCP-IP,
-	Framed-MTU >= 576,
-	Framed-Filter-ID =* ANY,
-	Reply-Message =* ANY,
-	Proxy-State =* ANY,
-	EAP-Message =* ANY,
-	Message-Authenticator =* ANY,
-	MS-MPPE-Recv-Key =* ANY,
-	MS-MPPE-Send-Key =* ANY,
-	MS-CHAP-MPPE-Keys =* ANY,
-	State =* ANY,
-	Session-Timeout <= 28800,
-	Idle-Timeout <= 600,
-	Calling-Station-Id =* ANY,
-	Operator-Name =* ANY,
-	Port-Limit <= 2
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy
deleted file mode 100644
index 786a341..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-#	Configuration file for the rlm_attr_filter module.
-#	Please see rlm_attr_filter(5) manpage for more information.
-#
-#	$Id: 8c601cf205f9d85b75c1ec7fc8e816e7341a5ba4 $
-#
-#	This file contains security and configuration information
-#	for each realm. It can be used be an rlm_attr_filter module
-#	instance to filter attributes before sending packets to the
-#	home server of a realm.
-#
-#	When a packet is sent to a home server, these attributes
-#	and values are tested. Only the first match is used unless
-#	the "Fall-Through" variable is set to "Yes". In that case
-#	the rules defined in the DEFAULT case are processed as well.
-#
-#	A special realm named "DEFAULT" matches on all realm names.
-#	You can have only one DEFAULT entry. All entries are processed
-#	in the order they appear in this file. The first entry that
-#	matches the login-request will stop processing unless you use
-#	the Fall-Through variable.
-#
-#	The first line indicates the realm to which the rules apply.
-#	Indented (with the tab character) lines following the first
-#	line indicate the filter rules.
-#
-
-# This is a complete entry for 'nochap' realm. It allows to send very
-# basic attributes to the home server. Note that there is no Fall-Through
-# entry so that no DEFAULT entry will be used. Only the listed attributes
-# will be sent in the packet, all other attributes will be filtered out.
-#
-#nochap
-#	User-Name =* ANY,
-#	User-Password =* ANY,
-#	NAS-Ip-Address =* ANY,
-#	NAS-Identifier =* ANY
-
-# The entry for the 'brokenas' realm removes the attribute NAS-Port-Type
-# if its value is different from 'Ethernet'. Then the default rules are
-# applied.
-#
-#brokenas
-#	NAS-Port-Type == Ethernet
-#	Fall-Through = Yes
-
-# The rest of this file contains the DEFAULT entry.
-# DEFAULT matches with all realm names.
-
-DEFAULT
-	User-Name =* ANY,
-	User-Password =* ANY,
-	CHAP-Password =* ANY,
-	CHAP-Challenge =* ANY,
-	MS-CHAP-Challenge =* ANY,
-	MS-CHAP-Response =* ANY,
-	EAP-Message =* ANY,
-	Message-Authenticator =* ANY,
-	State =* ANY,
-	NAS-IP-Address =* ANY,
-	NAS-Identifier =* ANY,
-	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/accounting b/src/test/setup/radius-config/freeradius/mods-config/files/accounting
deleted file mode 100644
index 552b274..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/files/accounting
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#	$Id: 322d33a01f26e3990ba19954b7847e6993ae389b $
-#
-#	This is like the 'users' file, but it is processed only for
-#	accounting packets.
-#
-
-#  Select between different accounting methods based for example on the
-#  Realm, the Huntgroup-Name or any combinaison of the attribute/value
-#  pairs contained in an accounting packet.
-#
-#DEFAULT Realm == "foo.net", Acct-Type := sql_log.foo
-#
-#DEFAULT Huntgroup-Name == "wifi", Acct-Type := sql_log.wifi
-#
-#DEFAULT Client-IP-Address == 10.0.0.1, Acct-Type := sql_log.other
-#
-#DEFAULT Acct-Status-Type == Start, Acct-Type := sql_log.start
-
-#  Replace the User-Name with the Stripped-User-Name, if it exists.
-#
-#DEFAULT
-#	User-Name := "%{%{Stripped-User-Name}:-%{User-Name}}"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/authorize b/src/test/setup/radius-config/freeradius/mods-config/files/authorize
deleted file mode 100644
index 3528563..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/files/authorize
+++ /dev/null
@@ -1,218 +0,0 @@
-#
-# 	Configuration file for the rlm_files module.
-# 	Please see rlm_files(5) manpage for more information.
-#
-# 	This file contains authentication security and configuration
-#	information for each user.  Accounting requests are NOT processed
-#	through this file.  Instead, see 'accounting', in this directory.
-#
-#	The first field is the user's name and can be up to
-#	253 characters in length.  This is followed (on the same line) with
-#	the list of authentication requirements for that user.  This can
-#	include password, comm server name, comm server port number, protocol
-#	type (perhaps set by the "hints" file), and huntgroup name (set by
-#	the "huntgroups" file).
-#
-#	If you are not sure why a particular reply is being sent by the
-#	server, then run the server in debugging mode (radiusd -X), and
-#	you will see which entries in this file are matched.
-#
-#	When an authentication request is received from the comm server,
-#	these values are tested. Only the first match is used unless the
-#	"Fall-Through" variable is set to "Yes".
-#
-#	A special user named "DEFAULT" matches on all usernames.
-#	You can have several DEFAULT entries. All entries are processed
-#	in the order they appear in this file. The first entry that
-#	matches the login-request will stop processing unless you use
-#	the Fall-Through variable.
-#
-#	Indented (with the tab character) lines following the first
-#	line indicate the configuration values to be passed back to
-#	the comm server to allow the initiation of a user session.
-#	This can include things like the PPP configuration values
-#	or the host to log the user onto.
-#
-#	You can include another `users' file with `$INCLUDE users.other'
-
-#
-#	For a list of RADIUS attributes, and links to their definitions,
-#	see: http://www.freeradius.org/rfc/attributes.html
-#
-#	Entries below this point are examples included in the server for
-#	educational purposes. They may be deleted from the deployed
-#	configuration without impacting the operation of the server.
-#
-
-#
-# Deny access for a specific user.  Note that this entry MUST
-# be before any other 'Auth-Type' attribute which results in the user
-# being authenticated.
-#
-# Note that there is NO 'Fall-Through' attribute, so the user will not
-# be given any additional resources.
-#
-#lameuser	Auth-Type := Reject
-#		Reply-Message = "Your account has been disabled."
-
-#
-# Deny access for a group of users.
-#
-# Note that there is NO 'Fall-Through' attribute, so the user will not
-# be given any additional resources.
-#
-#DEFAULT	Group == "disabled", Auth-Type := Reject
-#		Reply-Message = "Your account has been disabled."
-#
-
-#
-# This is a complete entry for "steve". Note that there is no Fall-Through
-# entry so that no DEFAULT entry will be used, and the user will NOT
-# get any attributes in addition to the ones listed here.
-#
-#steve	Cleartext-Password := "testing"
-#	Service-Type = Framed-User,
-#	Framed-Protocol = PPP,
-#	Framed-IP-Address = 172.16.3.33,
-#	Framed-IP-Netmask = 255.255.255.0,
-#	Framed-Routing = Broadcast-Listen,
-#	Framed-Filter-Id = "std.ppp",
-#	Framed-MTU = 1500,
-#	Framed-Compression = Van-Jacobsen-TCP-IP
-
-#
-# The canonical testing user which is in most of the
-# examples.
-#
-#bob	Cleartext-Password := "hello"
-#	Reply-Message := "Hello, %{User-Name}"
-#
-
-test	Cleartext-Password := "test"
-	Reply-Message := "Hello, %{User-Name}"
-
-raduser	Cleartext-Password := "radpass"
-	Reply-Message := "Hello, %{User-Name}"
-
-#
-# This is an entry for a user with a space in their name.
-# Note the double quotes surrounding the name.  If you have
-# users with spaces in their names, you must also change
-# the "filter_username" policy to allow spaces.
-#
-# See raddb/policy.d/filter, filter_username {} section.
-#
-#"John Doe"	Cleartext-Password := "hello"
-#		Reply-Message = "Hello, %{User-Name}"
-
-#
-# Dial user back and telnet to the default host for that port
-#
-#Deg	Cleartext-Password := "ge55ged"
-#	Service-Type = Callback-Login-User,
-#	Login-IP-Host = 0.0.0.0,
-#	Callback-Number = "9,5551212",
-#	Login-Service = Telnet,
-#	Login-TCP-Port = Telnet
-
-#
-# Another complete entry. After the user "dialbk" has logged in, the
-# connection will be broken and the user will be dialed back after which
-# he will get a connection to the host "timeshare1".
-#
-#dialbk	Cleartext-Password := "callme"
-#	Service-Type = Callback-Login-User,
-#	Login-IP-Host = timeshare1,
-#	Login-Service = PortMaster,
-#	Callback-Number = "9,1-800-555-1212"
-
-#
-# user "swilson" will only get a static IP number if he logs in with
-# a framed protocol on a terminal server in Alphen (see the huntgroups file).
-#
-# Note that by setting "Fall-Through", other attributes will be added from
-# the following DEFAULT entries
-#
-#swilson	Service-Type == Framed-User, Huntgroup-Name == "alphen"
-#		Framed-IP-Address = 192.0.2.65,
-#		Fall-Through = Yes
-
-#
-# If the user logs in as 'username.shell', then authenticate them
-# using the default method, give them shell access, and stop processing
-# the rest of the file.
-#
-#DEFAULT	Suffix == ".shell"
-#		Service-Type = Login-User,
-#		Login-Service = Telnet,
-#		Login-IP-Host = your.shell.machine
-
-
-#
-# The rest of this file contains the several DEFAULT entries.
-# DEFAULT entries match with all login names.
-# Note that DEFAULT entries can also Fall-Through (see first entry).
-# A name-value pair from a DEFAULT entry will _NEVER_ override
-# an already existing name-value pair.
-#
-
-#
-# Set up different IP address pools for the terminal servers.
-# Note that the "+" behind the IP address means that this is the "base"
-# IP address. The Port-Id (S0, S1 etc) will be added to it.
-#
-#DEFAULT	Service-Type == Framed-User, Huntgroup-Name == "alphen"
-#		Framed-IP-Address = 192.0.2.32+,
-#		Fall-Through = Yes
-
-#DEFAULT	Service-Type == Framed-User, Huntgroup-Name == "delft"
-#		Framed-IP-Address = 198.51.100.32+,
-#		Fall-Through = Yes
-
-#
-# Sample defaults for all framed connections.
-#
-#DEFAULT	Service-Type == Framed-User
-#	Framed-IP-Address = 255.255.255.254,
-#	Framed-MTU = 576,
-#	Service-Type = Framed-User,
-#	Fall-Through = Yes
-
-#
-# Default for PPP: dynamic IP address, PPP mode, VJ-compression.
-# NOTE: we do not use Hint = "PPP", since PPP might also be auto-detected
-#	by the terminal server in which case there may not be a "P" suffix.
-#	The terminal server sends "Framed-Protocol = PPP" for auto PPP.
-#
-DEFAULT	Framed-Protocol == PPP
-	Framed-Protocol = PPP,
-	Framed-Compression = Van-Jacobson-TCP-IP
-
-#
-# Default for CSLIP: dynamic IP address, SLIP mode, VJ-compression.
-#
-DEFAULT	Hint == "CSLIP"
-	Framed-Protocol = SLIP,
-	Framed-Compression = Van-Jacobson-TCP-IP
-
-#
-# Default for SLIP: dynamic IP address, SLIP mode.
-#
-DEFAULT	Hint == "SLIP"
-	Framed-Protocol = SLIP
-
-#
-# Last default: rlogin to our main server.
-#
-#DEFAULT
-#	Service-Type = Login-User,
-#	Login-Service = Rlogin,
-#	Login-IP-Host = shellbox.ispdomain.com
-
-# #
-# # Last default: shell on the local terminal server.
-# #
-# DEFAULT
-# 	Service-Type = Administrative-User
-
-# On no match, the user is denied access.
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy b/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy
deleted file mode 100644
index 9c848fd..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#  Configuration file for the rlm_files module.
-#  Please see rlm_files(5) manpage for more information.
-#
-#  $Id: 7292e23ea51717ee5cb50c4b9b609e91ebe4a41c $
-#
-#  This file is similar to the "users" file.  The check items
-#  are compared against the request, but the "reply" items are
-#  used to update the proxied packet, not the reply to the NAS.
-#
-#  You can use this file to re-write requests which are about to
-#  be sent to a home server.
-#
-
-#
-#  Requests destinated to realm "extisp" are sent to a RADIUS
-#  home server hosted by an other company which doesn't know about
-#  the IP addresses of our NASes. Therefore we replace the value of
-#  the NAS-IP-Address attribute by a unique value we communicated
-#  to them.
-#
-#DEFAULT Realm == "extisp"
-#	NAS-IP-Address := 10.1.2.3
-
-#
-#  For all proxied packets, set the User-Name in the proxied packet
-#  to the Stripped-User-Name, if it exists.  If not, set it to the
-#  User-Name from the original request.
-#
-#DEFAULT
-#	User-Name := `%{%{Stripped-User-Name}:-%{User-Name}}`
diff --git a/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl b/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl
deleted file mode 100644
index ac95aca..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl
+++ /dev/null
@@ -1,206 +0,0 @@
-
-#
-#  This program is free software; you can redistribute it and/or modify
-#  it under the terms of the GNU General Public License as published by
-#  the Free Software Foundation; either version 2 of the License, or
-#  (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software
-#  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
-#
-#  Copyright 2002  The FreeRADIUS server project
-#  Copyright 2002  Boian Jordanov <bjordanov@orbitel.bg>
-#
-
-#
-# Example code for use with rlm_perl
-#
-# You can use every module that comes with your perl distribution!
-#
-# If you are using DBI and do some queries to DB, please be sure to
-# use the CLONE function to initialize the DBI connection to DB.
-#
-
-use strict;
-use warnings;
-
-# use ...
-use Data::Dumper;
-
-# Bring the global hashes into the package scope
-our (%RAD_REQUEST, %RAD_REPLY, %RAD_CHECK);
-
-# This is hash wich hold original request from radius
-#my %RAD_REQUEST;
-# In this hash you add values that will be returned to NAS.
-#my %RAD_REPLY;
-#This is for check items
-#my %RAD_CHECK;
-# This is configuration items from "config" perl module configuration section
-#my %RAD_PERLCONF;
-
-#
-# This the remapping of return values
-#
-use constant {
-	RLM_MODULE_REJECT   => 0, # immediately reject the request
-	RLM_MODULE_OK       => 2, # the module is OK, continue
-	RLM_MODULE_HANDLED  => 3, # the module handled the request, so stop
-	RLM_MODULE_INVALID  => 4, # the module considers the request invalid
-	RLM_MODULE_USERLOCK => 5, # reject the request (user is locked out)
-	RLM_MODULE_NOTFOUND => 6, # user not found
-	RLM_MODULE_NOOP     => 7, # module succeeded without doing anything
-	RLM_MODULE_UPDATED  => 8, # OK (pairs modified)
-	RLM_MODULE_NUMCODES => 9  # How many return codes there are
-};
-
-# Same as src/include/radiusd.h
-use constant	L_DBG=>   1;
-use constant	L_AUTH=>  2;
-use constant	L_INFO=>  3;
-use constant	L_ERR=>   4;
-use constant	L_PROXY=> 5;
-use constant	L_ACCT=>  6;
-
-#  Global variables can persist across different calls to the module.
-#
-#
-#	{
-#	 my %static_global_hash = ();
-#
-#		sub post_auth {
-#		...
-#		}
-#		...
-#	}
-
-
-# Function to handle authorize
-sub authorize {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	# Here's where your authorization code comes
-	# You can call another function from here:
-	&test_call;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle authenticate
-sub authenticate {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	if ($RAD_REQUEST{'User-Name'} =~ /^baduser/i) {
-		# Reject user and tell him why
-		$RAD_REPLY{'Reply-Message'} = "Denied access by rlm_perl function";
-		return RLM_MODULE_REJECT;
-	} else {
-		# Accept user and set some attribute
-		$RAD_REPLY{'h323-credit-amount'} = "100";
-		return RLM_MODULE_OK;
-	}
-}
-
-# Function to handle preacct
-sub preacct {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle accounting
-sub accounting {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	# You can call another subroutine from here
-	&test_call;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle checksimul
-sub checksimul {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle pre_proxy
-sub pre_proxy {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle post_proxy
-sub post_proxy {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle post_auth
-sub post_auth {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	return RLM_MODULE_OK;
-}
-
-# Function to handle xlat
-sub xlat {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	# Loads some external perl and evaluate it
-	my ($filename,$a,$b,$c,$d) = @_;
-	&radiusd::radlog(L_DBG, "From xlat $filename ");
-	&radiusd::radlog(L_DBG,"From xlat $a $b $c $d ");
-	local *FH;
-	open FH, $filename or die "open '$filename' $!";
-	local($/) = undef;
-	my $sub = <FH>;
-	close FH;
-	my $eval = qq{ sub handler{ $sub;} };
-	eval $eval;
-	eval {main->handler;};
-}
-
-# Function to handle detach
-sub detach {
-	# For debugging purposes only
-#	&log_request_attributes;
-
-	# Do some logging.
-	&radiusd::radlog(L_DBG,"rlm_perl::Detaching. Reloading. Done.");
-}
-
-#
-# Some functions that can be called from other functions
-#
-
-sub test_call {
-	# Some code goes here
-}
-
-sub log_request_attributes {
-	# This shouldn't be done in production environments!
-	# This is only meant for debugging!
-	for (keys %RAD_REQUEST) {
-		&radiusd::radlog(L_DBG, "RAD_REQUEST: $_ = $RAD_REQUEST{$_}");
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints b/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints
deleted file mode 100644
index 87306ad..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints
+++ /dev/null
@@ -1,77 +0,0 @@
-# hints
-#
-#	The hints file.   This file is used to match
-#	a request, and then add attributes to it.  This
-#	process allows a user to login as "bob.ppp" (for example),
-#	and receive a PPP connection, even if the NAS doesn't
-#	ask for PPP.  The "hints" file is used to match the
-#	".ppp" portion of the username, and to add a set of
-#	"user requested PPP" attributes to the request.
-#
-#	Matching can take place with the the Prefix and Suffix
-#	attributes, just like in the "users" file.
-#	These attributes operate ONLY on the username, though.
-#
-#	Note that the attributes that are set for each
-#	entry are _NOT_ passed back to the terminal server.
-#	Instead they are added to the information that has
-#	been _SENT_ by the terminal server.
-#
-#	This extra information can be used in the users file to
-#	match on. Usually this is done in the DEFAULT entries,
-#	of which there can be more than one.
-#
-#	In addition a matching entry can transform a username
-#	for authentication purposes if the "Strip-User-Name"
-#	variable is set to Yes in an entry (default is Yes).
-#
-#	A special non-protocol name-value pair called "Hint"
-#	can be set to match on in the "users" file.
-#
-#	The following is how most ISPs want to set this up.
-#
-# Version:	$Id: f92ffb9f1e5bd0509b2e0e5e015001fda52bdfc3 $
-#
-
-
-DEFAULT	Suffix == ".ppp", Strip-User-Name = Yes
-	Hint = "PPP",
-	Service-Type = Framed-User,
-	Framed-Protocol = PPP
-
-DEFAULT	Suffix == ".slip", Strip-User-Name = Yes
-	Hint = "SLIP",
-	Service-Type = Framed-User,
-	Framed-Protocol = SLIP
-
-DEFAULT	Suffix == ".cslip", Strip-User-Name = Yes
-	Hint = "CSLIP",
-	Service-Type = Framed-User,
-	Framed-Protocol = SLIP,
-	Framed-Compression = Van-Jacobson-TCP-IP
-
-######################################################################
-#
-#	These entries are old, and commented out by default.
-#	They confuse too many people when "Peter" logs in, and the
-#	server thinks that the user "eter" is asking for PPP.
-#
-#DEFAULT	Prefix == "U", Strip-User-Name = No
-#	Hint = "UUCP"
-
-#DEFAULT	Prefix == "P", Strip-User-Name = Yes
-#	Hint = "PPP",
-#	Service-Type = Framed-User,
-#	Framed-Protocol = PPP
-
-#DEFAULT	Prefix == "S", Strip-User-Name = Yes
-#	Hint = "SLIP",
-#	Service-Type = Framed-User,
-#	Framed-Protocol = SLIP
-
-#DEFAULT	Prefix == "C", Strip-User-Name = Yes
-#	Hint = "CSLIP",
-#	Service-Type = Framed-User,
-#	Framed-Protocol = SLIP,
-#	Framed-Compression = Van-Jacobson-TCP-IP
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups b/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups
deleted file mode 100644
index a937c8b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# huntgroups	This file defines the `huntgroups' that you have. A
-#		huntgroup is defined by specifying the IP address of
-#		the NAS and possibly a port range. Port can be identified
-#		as just one port, or a range (from-to), and multiple ports
-#		or ranges of ports must be separated by a comma. For
-#		example: 1,2,3-8
-#
-#		Matching is done while RADIUS scans the user file; if it
-#		includes the selection criterium "Huntgroup-Name == XXX"
-#		the huntgroup is looked up in this file to see if it
-#		matches. There can be multiple definitions of the same
-#		huntgroup; the first one that matches will be used.
-#
-#		This file can also be used to define restricted access
-#		to certain huntgroups. The second and following lines
-#		define the access restrictions (based on username and
-#		UNIX usergroup) for the huntgroup.
-#
-
-#
-# Our POP in Alphen a/d Rijn has 3 terminal servers. Create a Huntgroup-Name
-# called Alphen that matches on all three terminal servers.
-#
-#alphen		NAS-IP-Address == 192.0.2.5
-#alphen		NAS-IP-Address == 192.0.2.6
-#alphen		NAS-IP-Address == 192.0.2.7
-
-#
-# The POP in Delft consists of only one terminal server.
-#
-#delft		NAS-IP-Address == 198.51.100.5
-
-#
-# Ports 0-7 on the first terminal server in Alphen are connected to
-# a huntgroup that is for business users only. Note that only one
-# of the username or groupname has to match to get access (OR/OR).
-#
-# Note that this huntgroup is a subset of the "alphen" huntgroup.
-#
-#business	NAS-IP-Address == 198.51.100.5, NAS-Port-Id == 0-7
-#		User-Name = rogerl,
-#		User-Name = henks,
-#		Group = business,
-#		Group = staff
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/python/example.py b/src/test/setup/radius-config/freeradius/mods-config/python/example.py
deleted file mode 100755
index e4986c6..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/python/example.py
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#!/usr/bin/env python
-#
-# Definitions for RADIUS programs
-#
-# Copyright 2002 Miguel A.L. Paraz <mparaz@mparaz.com>
-#
-# This should only be used when testing modules.
-# Inside freeradius, the 'radiusd' Python module is created by the C module
-# and the definitions are automatically created.
-#
-# $Id: 02e9f237cc0df3d7be08413238e504b90bf59b1a $
-
-# from modules.h
-
-RLM_MODULE_REJECT = 0
-RLM_MODULE_FAIL = 1
-RLM_MODULE_OK = 2
-RLM_MODULE_HANDLED = 3
-RLM_MODULE_INVALID = 4
-RLM_MODULE_USERLOCK = 5
-RLM_MODULE_NOTFOUND = 6
-RLM_MODULE_NOOP = 7	
-RLM_MODULE_UPDATED = 8
-RLM_MODULE_NUMCODES = 9
-
-
-# from radiusd.h
-L_DBG = 1
-L_AUTH = 2
-L_INFO = 3
-L_ERR = 4
-L_PROXY	= 5
-L_CONS = 128
-
-OP={       '{':2,   '}':3,   '(':4,   ')':5,   ',':6,   ';':7,  '+=':8,  '-=':9,  ':=':10,
-  '=':11, '!=':12, '>=':13,  '>':14, '<=':15,  '<':16, '=~':17, '!~':18, '=*':19, '!*':20,
- '==':21 , '#':22 }
-
-OP_TRY = (':=', '+=', '-=', '=' )
-
-def resolve(*lines):
-    tuples = []
-    for line in lines:
-	for op in OP_TRY:
-	    arr = line.rsplit(op)
-	    if len(arr)==2:
-		tuples.append((str(arr[0].strip()),OP[op],str(arr[1].strip())))
-		break
-    return tuple(tuples)
-
-# log function
-def radlog(level, msg):
-    import sys
-    sys.stdout.write(msg + '\n')
-
-    level = level
-
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf
deleted file mode 100644
index 97c4661..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-#
-query = "\
-	SELECT SUM(acctsessiontime - GREATEST((%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
-	FROM radacct \
-	WHERE username = '%{${key}}' \
-	AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%b'"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE username = '%{${key}}' \
-#	AND acctstarttime > FROM_UNIXTIME('%b')"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE username = '%{${key}}' \
-#	AND acctstarttime BETWEEN FROM_UNIXTIME('%b') AND FROM_UNIXTIME('%e')"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf
deleted file mode 100644
index 97e1bc5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-query = "\
-	SELECT TIMESTAMPDIFF(SECOND, acctstarttime, NOW()) \
-	FROM radacct \
-	WHERE UserName='%{${key}}' \
-	ORDER BY acctstarttime \
-	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf
deleted file mode 100644
index 6d93d15..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-#
-query = "\
-	SELECT SUM(acctsessiontime - GREATEST((%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
-	FROM radacct \
-	WHERE username='%{${key}}' \
-	AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%b'"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct\
-#	WHERE username='%{${key}}' \
-#	AND acctstarttime > FROM_UNIXTIME('%b')"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE username='%{${key}}' \
-#	AND acctstarttime BETWEEN FROM_UNIXTIME('%b') \
-#	AND FROM_UNIXTIME('%e')"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf
deleted file mode 100644
index abcb21b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-query = "\
-	SELECT IFNULL(SUM(AcctSessionTime),0) \
-	FROM radacct \
-	WHERE UserName='%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf
deleted file mode 100644
index 64802bf..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-#
-query = "\
-	SELECT SUM(AcctSessionTime - GREATER((%b - AcctStartTime::ABSTIME::INT4), 0)) \
-	FROM radacct \
-	WHERE UserName='%{${key}}' \
-	AND AcctStartTime::ABSTIME::INT4 + AcctSessionTime > '%b'"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(AcctSessionTime) \
-#	FROM radacct \
-#	WHERE UserName='%{${key}}' \
-#	AND AcctStartTime::ABSTIME::INT4 > '%b'"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(AcctSessionTime) \
-#	FROM radacct \
-#	WHERE UserName='%{${key}}' \
-#	AND AcctStartTime::ABSTIME::INT4 BETWEEN '%b' \
-#	AND '%e'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf
deleted file mode 100644
index c4ce096..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-query = "\
-	SELECT TIME_TO_SEC(TIMEDIFF(NOW(), acctstarttime)) \
-	FROM radacct \
-	WHERE UserName='%{${key}}' \
-	ORDER BY acctstarttime \
-	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf
deleted file mode 100644
index eb831a4..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf
+++ /dev/null
@@ -1,31 +0,0 @@
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-query = "\
-	SELECT SUM(AcctSessionTime - GREATER((%b - AcctStartTime::ABSTIME::INT4), 0)) \
-	FROM radacct \
-	WHERE UserName='%{${key}}' \
-	AND AcctStartTime::ABSTIME::INT4 + AcctSessionTime > '%b'"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(AcctSessionTime) \
-#	FROM radacct \
-#	WHERE UserName='%{${key}}' \
-#	AND AcctStartTime::ABSTIME::INT4 > '%b'"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(AcctSessionTime) \
-#	FROM radacct \
-#	WHERE UserName='%{${key}}' \
-#	AND AcctStartTime::ABSTIME::INT4 BETWEEN '%b' AND '%e'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf
deleted file mode 100644
index ac5182e..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-query = "\
-	SELECT SUM(AcctSessionTime) \
-	FROM radacct \
-	WHERE UserName='%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf
deleted file mode 100644
index 6befdcc..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-#
-query = "\
-	SELECT SUM(acctsessiontime - GREATEST((%b - strftime('%%s', acctstarttime)), 0)) \
-	FROM radacct \
-	WHERE username = '%{${key}}' \
-	AND (strftime('%%s', acctstarttime) + acctsessiontime) > %b"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE \username = '%{${key}}' \
-#	AND acctstarttime > %b"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) FROM radacct \
-#	WHERE username = '%{${key}}' \
-#	AND acctstarttime BETWEEN %b \
-#	AND %e"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf
deleted file mode 100644
index f4e95a5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-query = "\
-	SELECT GREATEST(strftime('%%s', NOW()) - strftime('%%s', acctstarttime), 0) AS expires \
-	FROM radacct \
-	WHERE username = '%{${key}}' \
-	ORDER BY acctstarttime \
-	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf
deleted file mode 100644
index 5bb8140..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#  This query properly handles calls that span from the
-#  previous reset period into the current period but
-#  involves more work for the SQL server than those
-#  below
-#
-query = "\
-	SELECT SUM(acctsessiontime - GREATEST((%b - strftime('%%s', acctstarttime)), 0)) \
-	FROM radacct \
-	WHERE username = '%{${key}}' AND \
-	(strftime('%%s', acctstarttime) + acctsessiontime) > %b"
-
-#
-#  This query ignores calls that started in a previous
-#  reset period and continue into into this one. But it
-#  is a little easier on the SQL server
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE username = '%{${key}}' \
-#	AND acctstarttime > %b"
-
-#
-#  This query is the same as above, but demonstrates an
-#  additional counter parameter '%e' which is the
-#  timestamp for the end of the period
-#
-#query = "\
-#	SELECT SUM(acctsessiontime) \
-#	FROM radacct \
-#	WHERE username = '%{${key}}' \
-#	AND acctstarttime BETWEEN %b \
-#	AND %e"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf
deleted file mode 100644
index ac2d869..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-query = "\
-	SELECT IFNULL(SUM(acctsessiontime),0) \
-	FROM radacct \
-	WHERE username = '%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf
deleted file mode 100644
index 415c416..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- text -*-
-#
-#  cui/mysql/queries.conf -- Queries to update a MySQL CUI table.
-#
-#  $Id: f8f18cab562e7321756cd1f3411bbc9897ef3377 $
-
-post-auth {
-	query = "\
-		INSERT IGNORE INTO ${..cui_table} \
-			(clientipaddress, callingstationid, username, cui, lastaccounting) \
-		VALUES \
-			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
-			'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL) \
-		ON DUPLICATE KEY UPDATE \
-			lastaccounting='0000-00-00 00:00:00', \
-			cui='%{reply:Chargeable-User-Identity}'"
-
-}
-
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-	type {
-		start {
-			query = "\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = CURRENT_TIMESTAMP \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		interim-update {
-			query ="\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = CURRENT_TIMESTAMP \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		stop {
-			query ="\
-				DELETE FROM ${....cui_table} \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql
deleted file mode 100644
index 5d9804d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE `cui` (
-  `clientipaddress` varchar(46) NOT NULL default '',
-  `callingstationid` varchar(50) NOT NULL default '',
-  `username` varchar(64) NOT NULL default '',
-  `cui` varchar(32) NOT NULL default '',
-  `creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
-  `lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
-  PRIMARY KEY  (`username`,`clientipaddress`,`callingstationid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf
deleted file mode 100644
index 0e985b3..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- text -*-
-#
-#  cui/postgresql/queries.conf -- Queries to update a PostgreSQL CUI table.
-#
-#  $Id: 6c2215f0abbe5cb30658ea541d525fd7a274c547 $
-
-post-auth {
-	query = "\
-		INSERT INTO ${..cui_table} \
-			(clientipaddress, callingstationid, username, cui) \
-		VALUES \
-			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
-			'%{User-Name}', '%{reply:Chargeable-User-Identity}')"
-
-}
-
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-	type {
-		start {
-			query = "\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = now() \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		interim-update {
-			query ="\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = now() \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		stop {
-			query ="\
-				DELETE FROM ${....cui_table} \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql
deleted file mode 100644
index abda3ae..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE cui (
-  clientipaddress INET NOT NULL DEFAULT '0.0.0.0',
-  callingstationid varchar(50) NOT NULL DEFAULT '',
-  username varchar(64) NOT NULL DEFAULT '',
-  cui varchar(32) NOT NULL DEFAULT '',
-  creationdate TIMESTAMP with time zone NOT NULL default 'now()',
-  lastaccounting TIMESTAMP with time zone NOT NULL default '-infinity'::timestamp,
-  PRIMARY KEY  (username, clientipaddress, callingstationid)
-);
-
-CREATE RULE postauth_query AS ON INSERT TO cui
-	WHERE EXISTS(SELECT 1 FROM cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid))
-	DO INSTEAD UPDATE cui SET lastaccounting ='-infinity'::timestamp with time zone, cui=NEW.cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid);
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf
deleted file mode 100644
index defc591..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- text -*-
-#
-#  cui/sqlite/queries.conf -- Queries to update a sqlite CUI table.
-#
-#  $Id: 41741eb70ae9c428ba5230aaf9d9b84f95c050a9 $
-
-post-auth {
-	query = "\
-		INSERT OR REPLACE INTO ${..cui_table} \
-			(clientipaddress, callingstationid, username, cui, lastaccounting) \
-		VALUES \
-			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
-			'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL)"
-
-}
-
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-	type {
-		start {
-			query = "\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = CURRENT_TIMESTAMP \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		interim-update {
-			query ="\
-				UPDATE ${....cui_table} SET \
-					lastaccounting = CURRENT_TIMESTAMP \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-		stop {
-			query ="\
-				DELETE FROM ${....cui_table} \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}' \
-				AND cui = '%{Chargeable-User-Identity}'"
-		}
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql
deleted file mode 100644
index 7181d01..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE `cui` (
-  `clientipaddress` varchar(46) NOT NULL default '',
-  `callingstationid` varchar(50) NOT NULL default '',
-  `username` varchar(64) NOT NULL default '',
-  `cui` varchar(32) NOT NULL default '',
-  `creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
-  `lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
-  PRIMARY KEY  (`username`,`clientipaddress`,`callingstationid`)
-);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf
deleted file mode 100644
index eb1a79d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf
+++ /dev/null
@@ -1,161 +0,0 @@
-# -*- text -*-
-#
-#  ippool-dhcp/mysql/queries.conf -- MySQL queries for rlm_sqlippool
-#
-#  $Id: ac9476e5091010d39c9212e424efb360a99a1e71 $
-
-#
-# This series of queries allocates an IP address
-#
-#allocate_clear = "\
-#	UPDATE ${ippool_table} \
-#	SET \
-#		nasipaddress = '', \
-#		pool_key = 0, \
-#		callingstationid = '', \
-#		username = '', \
-#		expiry_time = NULL \
-#	WHERE pool_key = '${pool_key}'"
-
-#
-#  This series of queries allocates an IP address
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE expiry_time <= NOW() - INTERVAL 1 SECOND \
-	AND nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  which user had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress \
-	FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND (expiry_time < NOW() OR expiry_time IS NULL) \
-	ORDER BY \
-		(username <> '%{User-Name}'), \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	LIMIT 1 \
-	OR UPDATE"
-
-#
-#  If you prefer to allocate a random IP address every time, use this query instead
-#
-#allocate_find = "\
-#	SELECT framedipaddress \
-#	FROM ${ippool_table} \
-#	WHERE pool_name = '%{control:Pool-Name}' \
-#	AND expiry_time IS NULL \
-#	ORDER BY RAND() \
-#	LIMIT 1 \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see if the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be
-#  commented out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM ${ippool_table} \
-	WHERE pool_name='%{control:Pool-Name}' \
-	LIMIT 1"
-
-#
-#  This is the final IP Allocation query, which saves the allocated ip details
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{User-Name}', \
-		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE framedipaddress = '%I' AND expiry_time IS NULL"
-
-#
-#  This series of queries frees an IP number when an accounting
-#  START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting
-#  STOP record arrives
-#
-stop_clear = "UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting
-#  ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting ON record arrives
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting OFF record arrives
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf
deleted file mode 100644
index 673547b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- text -*-
-#
-#  ippool-dhcp/oracle/queries.conf -- Oracle queries for dhcp-ippool
-#
-#  $id: 416d59802a1321c16b936bb5e63c288ca3634bcd $
-
-#
-#  "START TRANSACTION" not required with Oracle
-#
-allocate_begin = ""
-start_begin = ""
-alive_begin = ""
-stop_begin = ""
-on_begin = ""
-off_begin = ""
-
-#
-#  This query allocates an IP address from the Pool
-#  It query tries to allocate to the user
-#  either the same IP-address that they had last session
-#  or the IP which has been unused for the longest period of time
-#
-allocate_find = "\
-	WITH POOLS AS (\
-		SELECT * \
-		FROM ${ippool_table} \
-		WHERE pool_name = '%{control:Pool-Name}' \
-		AND (\
-			pool_key = '${pool_key}' \
-			OR expiry_time = (\
-				SELECT MIN(expiry_time) \
-				FROM ${ippool_table} \
-				WHERE pool_name = '%{control:Pool-Name}' \
-				AND expiry_time < CURRENT_TIMESTAMP AND pool_key != '${pool_key}'\
-			)\
-		)\
-	) \
-	SELECT framedipaddress \
-	FROM (\
-		SELECT framedipaddress \
-		FROM POOLS \
-		WHERE pool_key = '${pool_key}' \
-		OR (\
-			NOT EXISTS (\
-				SELECT 1 \
-				FROM POOLS \
-				WHERE pool_key = '${pool_key}'\
-			)\
-		)\
-	) WHERE ROWNUM = 1 FOR UPDATE"
-
-#
-#  This function is available if you want to use multiple pools
-#
-#allocate_find = "\
-	SELECT msqlippool('%{SQL-User-Name}','%{control:Pool-Name}') \
-	FROM dual"
-
-#
-#  If you prefer to allocate a random IP address every time, use this query instead
-#
-#allocate_find = "\
-#	SELECT framedipaddress \
-#	FROM ${ippool_table}\
-#	WHERE framedipaddress = (\
-#		SELECT framedipaddress \
-#		FROM (\
-#			SELECT framedipaddress \
-#			FROM ${ippool_table} \
-#			WHERE pool_name = '%{control:Pool-Name}' \
-#			AND expiry_time < CURRENT_TIMESTAMP \
-#			ORDER BY DBMS_RANDOM.VALUE\
-#		) \
-#		WHERE ROWNUM = 1\
-#	) \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see whether the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be commented
-#  out to save running this query every time an ip is not allocated.
-#
-#pool_check = "\
-#	SELECT id \
-#	FROM (\
-#		SELECT id \
-#		FROM ${ippool_table} \
-#		WHERE pool_name = '%{control:Pool-Name}'\
-#	) WHERE ROWNUM = 1"
-
-#
-#  This query marks the IP address handed out by "allocate_find" as used
-#  for the period of "lease_duration" after which time it may be reused.
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-id}', \
-		username = '%{SQL-User-Name}', \
-		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
-	WHERE framedipaddress = '%I'"
-
-#
-#  This query frees the IP address assigned to "pool_key" when a new request
-#  comes in for the same "pool_key". This means that either you are losing
-#  accounting Stop records or you use Calling-Station-id instead of NAS-Port
-#  as your "pool_key" and your users are able to reconnect before your NAS
-#  has timed out their previous session. (Generally on wireless networks)
-#  (Note: If your pool_key is set to Calling-Station-id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{NAS-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
-	WHERE pool_key = '${pool_key}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_name = '%{control:Pool-Name}' \
-	AND pool_key = '${pool_key}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This query frees an IP address when an accounting
-#  STOP record arrives
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
-	WHERE pool_key = '${pool_key}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
-	WHERE pool_key = '${pool_key}' \
-	AND pool_name = '%{control:Pool-Name}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting ON record arrives from that NAS
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
-	WHERE nasipaddress = '%{NAS-IP-Address}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting OFF record arrives from that NAS
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
-	WHERE nasipaddress = '%{NAS-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql
deleted file mode 100644
index 8289e0f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE radippool (
-	id                      INT PRIMARY KEY,
-	pool_name               VARCHAR(30) NOT NULL,
-	framedipaddress         VARCHAR(30) NOT NULL,
-	nasipaddress            VARCHAR(30) NOT NULL,
-	pool_key                VARCHAR(64) NOT NULL,
-	calledstationid         VARCHAR(64),
-	callingstationid        VARCHAR(64) NOT NULL,
-	expiry_time             TIMESTAMP(0) NOT NULL,
-	username                VARCHAR(100)
-);
-
-CREATE INDEX radippool_poolname_ipaddr ON radippool (pool_name, framedipaddress);
-CREATE INDEX radippool_poolname_expire ON radippool (pool_name, expiry_time);
-CREATE INDEX radippool_nasipaddr_key ON radippool (nasipaddress, pool_key);
-CREATE INDEX radippool_nasipaddr_calling ON radippool (nasipaddress, callingstationid);
-
-CREATE SEQUENCE radippool_seq START WITH 1 INCREMENT BY 1;
-
-CREATE OR REPLACE TRIGGER radippool_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radippool
-	FOR EACH ROW
-	BEGIN
-		IF ( :NEW.id = 0 OR :NEW.id IS NULL ) THEN
-			SELECT radippool_seq.NEXTVAL INTO :NEW.id FROM dual;
-		END IF;
-	END;
-/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf
deleted file mode 100644
index 124f349..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf
+++ /dev/null
@@ -1,165 +0,0 @@
-# -*- text -*-
-#
-#  ippool-dhcp/sqlite/queries.conf -- SQLite queries for rlm_sqlippool
-#
-#  $Id: 8709a562ee8877f02a43118464371eae101f3fbc $
-
-#
-#  This series of queries allocates an IP address
-#
-#allocate_clear = "\
-#	UPDATE ${ippool_table} \
-#	SET \
-#		nasipaddress = '', \
-#		pool_key = 0, \
-#		callingstationid = '', \
-#		username = '', \
-#		expiry_time = NULL \
-#	WHERE pool_key = '${pool_key}'"
-
-#
-#  This series of queries allocates an IP address
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE expiry_time <= datetime(strftime('%%s', 'now') - 1, 'unixepoch') \
-	AND nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  which user had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress \
-	FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND (\
-		((expiry_time < datetime('now')) OR expiry_time IS NULL) \
-		OR (callingstationid = '%{Calling-Station-Id}') \
-		AND expiry_time > datetime('now')\
-	) \
-	ORDER BY \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	LIMIT 1"
-
-#
-# If you prefer to allocate a random IP address every time, use this query instead
-#
-#allocate_find = "\
-#	SELECT framedipaddress FROM ${ippool_table} \
-#	WHERE pool_name = '%{control:Pool-Name}' \
-#	AND expiry_time IS NULL \
-#	ORDER BY RAND() \
-#	LIMIT 1 \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see if the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be
-#  commented out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM ${ippool_table} \
-	WHERE pool_name='%{control:Pool-Name}' \
-	LIMIT 1"
-
-#
-#  This is the final IP Allocation query, which saves the allocated ip details
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{User-Name}', \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE framedipaddress = '%I' \
-	AND expiry_time IS NULL"
-
-#
-#  The following queries are not used for DHCP IP assignment.
-#
-
-#
-#  This series of queries frees an IP number when an accounting START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting STOP record arrives
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting ON record arrives
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE \nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting OFF record arrives
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql
deleted file mode 100644
index 22b2809..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE radippool (
-	id                      int PRIMARY KEY,
-	pool_name               varchar(30) NOT NULL,
-	framedipaddress         varchar(30) NOT NULL,
-	nasipaddress            varchar(30) NOT NULL DEFAULT '',
-	pool_key                varchar(64) NOT NULL DEFAULT '',
-	calledstationid         varchar(64),
-	callingstationid        varchar(64) NOT NULL DEFAULT '',
-	expiry_time             timestamp DEFAULT NULL,
-	username                varchar(100)
-);
- 
--- Example of how to put IPs in the pool
--- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (1, 'local', '192.168.5.10');
--- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (2, 'local', '192.168.5.11');
--- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (3, 'local', '192.168.5.12');
--- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (4, 'local', '192.168.5.13');
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf
deleted file mode 100644
index e17c513..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf
+++ /dev/null
@@ -1,157 +0,0 @@
-# -*- text -*-
-#
-#  ippool/mysql/queries.conf -- MySQL queries for rlm_sqlippool
-#
-#  $Id: ecdb8beda2fe841c07f513f3a6be9e535f73875b $
-
-#
-#  This series of queries allocates an IP address
-#
-#allocate_clear = "\
-#	UPDATE ${ippool_table} \
-#	SET \
-#		nasipaddress = '', \
-#		pool_key = 0, \
-#		callingstationid = '', \
-#		username = '', \
-#		expiry_time = NULL \
-#	WHERE pool_key = '${pool_key}'"
-
-#
-#  This series of queries allocates an IP address
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE expiry_time <= NOW() - INTERVAL 1 SECOND \
-	AND nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  which user had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND (expiry_time < NOW() OR expiry_time IS NULL) \
-	ORDER BY \
-		(username <> '%{User-Name}'), \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	LIMIT 1 \
-	FOR UPDATE"
-
-#
-#  If you prefer to allocate a random IP address every time, use this query instead.
-#
-#allocate_find = "\
-#	SELECT framedipaddress FROM ${ippool_table} \
-#	WHERE pool_name = '%{control:Pool-Name}' \
-#	AND expiry_time IS NULL \
-#	ORDER BY \
-#		RAND() \
-#	LIMIT 1 \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see if the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be
-#  commented out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM ${ippool_table} \
-	WHERE pool_name='%{control:Pool-Name}' \
-	LIMIT 1"
-
-#
-#  This is the final IP Allocation query, which saves the allocated ip details.
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{User-Name}', expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE framedipaddress = '%I' \
-	AND expiry_time IS NULL"
-
-#
-#  This series of queries frees an IP number when an accounting START record arrives.
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting STOP record arrives.
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting ALIVE record arrives.
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting ON record arrives
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting OFF record arrives
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql
deleted file mode 100644
index 40626d0..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Table structure for table 'radippool'
-#
-CREATE TABLE radippool (
-  id                    int(11) unsigned NOT NULL auto_increment,
-  pool_name             varchar(30) NOT NULL,
-  framedipaddress       varchar(15) NOT NULL default '',
-  nasipaddress          varchar(15) NOT NULL default '',
-  calledstationid       VARCHAR(30) NOT NULL,
-  callingstationid      VARCHAR(30) NOT NULL,
-  expiry_time           DATETIME NULL default NULL,
-  username              varchar(64) NOT NULL default '',
-  pool_key              varchar(30) NOT NULL,
-  PRIMARY KEY (id),
-  KEY radippool_poolname_expire (pool_name, expiry_time),
-  KEY framedipaddress (framedipaddress),
-  KEY radippool_nasip_poolkey_ipaddress (nasipaddress, pool_key, framedipaddress)
-) ENGINE=InnoDB;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql
deleted file mode 100644
index a96fc56..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql
+++ /dev/null
@@ -1,57 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE OR REPLACE FUNCTION msqlippool(user varchar2, pool varchar2)
-RETURN varchar2 IS
-
-	PRAGMA AUTONOMOUS_TRANSACTION;
-	ip_temp varchar2(20);
-BEGIN
-
-    -- If the user's pool is dynamic, get an ipaddress (oldest one) from the corresponding pool
-
-    if pool = 'Dynamic' then
-	select framedipaddress into ip_temp from (select framedipaddress from radippool where expiry_time < current_timestamp and pool_name = pool ORDER BY expiry_time) where rownum = 1;
-	return (ip_temp);
-
-    -- Else, then get the static ipaddress for that user from the corresponding pool
-
-    else
-	select framedipaddress into ip_temp from radippool where username = user and pool_name = pool;
-	return (ip_temp);
-    end if;
-
-exception
-
- -- This block is executed if there's no free ipaddresses or no static ip assigned to the user
-
- when NO_DATA_FOUND then
-	if pool = 'Dynamic' then
-		return(''); -- so sqlippool can log it on radius.log
-	end if;
-
-	-- Else, grabs a free IP from the static pool and saves it in radippool so the user will always get the same IP the next time
-
-	select framedipaddress into ip_temp from (select framedipaddress from radippool where expiry_time < current_timestamp and username is null and pool_name = pool) where rownum = 1;
-	UPDATE radippool SET username = user where framedipaddress = ip_temp;
-	commit;
-	return (ip_temp);
-
- when others
-  then return('Oracle Exception');
-
-END;
-/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf
deleted file mode 100644
index 686f92a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf
+++ /dev/null
@@ -1,162 +0,0 @@
-# -*- text -*-
-#
-#  ippool/oracle/queries.conf -- Oracle queries for rlm_sqlippool
-#
-#  $Id: 06d37f8985f3da1ac36276bdc9ca9c15a42d4059 $
-
-allocate_begin = "commit"
-start_begin = "commit"
-alive_begin = "commit"
-stop_begin = "commit"
-on_begin = "commit"
-off_begin = "commit"
-
-#
-#  This query allocates an IP address from the Pool
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  to the user that they had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress \
-	FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND expiry_time < current_timestamp \
-	AND rownum <= 1 \
-	ORDER BY \
-		(username <> '%{SQL-User-Name}'), \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	FOR UPDATE"
-
-#
-#  This function is available if you want to use multiple pools
-#
-#allocate_find = "\
-#	SELECT msqlippool('%{SQL-User-Name}','%{control:Pool-Name}') \
-#	FROM dual"
-
-#
-#  If you prefer to allocate a random IP address every time, use this query instead
-#
-#allocate_find = "\
-#	SELECT framedipaddress \
-#	FROM ${ippool_table} \
-#	WHERE pool_name = '%{control:Pool-Name}' \
-#	AND expiry_time < current_timestamp \
-#	AND rownum <= 1 \
-#	ORDER BY RANDOM() \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see whether the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be commented
-#  out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM (\
-		SELECT id \
-		FROM ${ippool_table} \
-		WHERE pool_name='%{control:Pool-Name}'\
-	) \
-	WHERE ROWNUM = 1"
-
-#
-#  This query marks the IP address handed out by "allocate-find" as used
-#  for the period of "lease_duration" after which time it may be reused.
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{SQL-User-Name}', \
-		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
-	WHERE framedipaddress = '%I'"
-
-#
-#  This query frees the IP address assigned to "pool_key" when a new request
-#  comes in for the same "pool_key". This means that either you are losing
-#  accounting Stop records or you use Calling-Station-Id instead of NAS-Port
-#  as your "pool_key" and your users are able to reconnect before your NAS
-#  has timed out their previous session. (Generally on wireless networks)
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = current_timestamp - INTERVAL '1' second(1) \
-	WHERE pool_key = '${pool_key}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}'"
-
-#
-#  This query frees an IP address when an accounting STOP record arrives
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = current_timestamp - INTERVAL '1' second(1) \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{SQL-User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND framedipaddress = '%{Framed-IP-Address}' \
-	AND username = '%{SQL-User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting ON record arrives from that NAS
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = current_timestamp - INTERVAL '1' second(1) \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting OFF record arrives from that NAS
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = current_timestamp - INTERVAL '1' second(1) \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql
deleted file mode 100644
index ce3c343..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-CREATE TABLE radippool (
-	id                      INT PRIMARY KEY,
-	pool_name               VARCHAR(30) NOT NULL,
-	framedipaddress         VARCHAR(30) NOT NULL,
-	nasipaddress            VARCHAR(30) NOT NULL,
-	pool_key                INT NOT NULL,
-	CalledStationId         VARCHAR(64),
-	CallingStationId        VARCHAR(64) NOT NULL,
-	expiry_time             timestamp(0) NOT NULL,
-	username                VARCHAR(100)
-);
-
-CREATE INDEX radippool_poolname_ipaadr ON radippool (pool_name, framedipaddress);
-CREATE INDEX radippool_poolname_expire ON radippool (pool_name, expiry_time);
-CREATE INDEX radippool_nasipaddr_key ON radippool (nasipaddress, pool_key);
-CREATE INDEX radippool_nasipaddr_calling ON radippool (nasipaddress, callingstationid);
-
-CREATE SEQUENCE radippool_seq START WITH 1 INCREMENT BY 1;
-
-CREATE OR REPLACE TRIGGER radippool_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radippool
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radippool_seq.nextval into :new.id from dual;
-		end if;
-	END;
-/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf
deleted file mode 100644
index d286cf6..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- text -*-
-#
-#  ippool/postgresql/queries.conf -- PostgreSQL queries for rlm_sqlippool
-#
-#  $Id: 38465e829f61efab50f565dc349ef64b29052f21 $
-
-#
-#  This query allocates an IP address from the Pool
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  to the user that they had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress \
-	FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND expiry_time < 'now'::timestamp(0) \
-	ORDER BY \
-		(username <> '%{SQL-User-Name}'), \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	LIMIT 1 \
-	FOR UPDATE"
-
-#
-#  If you prefer to allocate a random IP address every time, use this query instead
-#
-allocate_find = "\
-	SELECT framedipaddress FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' AND expiry_time < 'now'::timestamp(0) \
-	ORDER BY RANDOM() \
-	LIMIT 1 \
-	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see whether the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be commented
-#  out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM ${ippool_table} \
-	WHERE pool_name='%{control:Pool-Name}' \
-	LIMIT 1"
-
-#
-#  This query marks the IP address handed out by "allocate-find" as used
-#  for the period of "lease_duration" after which time it may be reused.
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{SQL-User-Name}', \
-		expiry_time = 'now'::timestamp(0) + '${lease_duration} second'::interval \
-	WHERE framedipaddress = '%I'"
-
-#
-#  This query frees the IP address assigned to "pool_key" when a new request
-#  comes in for the same "pool_key". This means that either you are losing
-#  accounting Stop records or you use Calling-Station-Id instead of NAS-Port
-#  as your "pool_key" and your users are able to reconnect before your NAS
-#  has timed out their previous session. (Generally on wireless networks)
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = 'now'::timestamp(0) + '${lease_duration} second'::interval \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}'"
-
-#
-#  This query frees an IP address when an accounting
-#  STOP record arrives
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{SQL-User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This query extends an IP address lease by "lease_duration" when an accounting
-#  ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = 'now'::timestamp(0) + '${lease_duration} seconds'::interval \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND framedipaddress = '%{Framed-IP-Address}' \
-	AND username = '%{SQL-User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting ON record arrives from that NAS
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This query frees all IP addresses allocated to a NAS when an
-#  accounting OFF record arrives from that NAS
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql
deleted file mode 100644
index c5e5ae1..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql
+++ /dev/null
@@ -1,35 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
---
--- Table structure for table 'radippool'
---
-
-CREATE TABLE radippool (
-	id			BIGSERIAL PRIMARY KEY,
-	pool_name		varchar(64) NOT NULL,
-	FramedIPAddress		INET NOT NULL,
-	NASIPAddress		VARCHAR(16) NOT NULL default '',
-	pool_key		VARCHAR(64) NOT NULL default 0,
-	CalledStationId		VARCHAR(64),
-	CallingStationId	text NOT NULL default ''::text,
-	expiry_time		TIMESTAMP(0) without time zone NOT NULL default 'now'::timestamp(0),
-	username		text DEFAULT ''::text
-);
-
-CREATE INDEX radippool_poolname_expire ON radippool USING btree (pool_name, expiry_time);
-CREATE INDEX radippool_framedipaddress ON radippool USING btree (framedipaddress);
-CREATE INDEX radippool_nasip_poolkey_ipaddress ON radippool USING btree (nasipaddress, pool_key, framedipaddress);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf
deleted file mode 100644
index fc8fa9f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf
+++ /dev/null
@@ -1,162 +0,0 @@
-# -*- text -*-
-#
-#  ippool/sqlite/queries.conf -- SQLite queries for rlm_sqlippool
-#
-#  $Id: e912bd32a7485f6a505dbb67ad6f54138845cdee $
-
-#
-#  This series of queries allocates an IP address
-#
-#allocate_clear = "\
-#	UPDATE ${ippool_table} \
-#	SET \
-#		nasipaddress = '', pool_key = 0, \
-#		callingstationid = '', username = '', \
-#		expiry_time = NULL \
-#	WHERE pool_key = '${pool_key}'"
-
-#
-#  This series of queries allocates an IP address
-#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
-#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
-#  from the WHERE clause)
-#
-allocate_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE expiry_time <= datetime(strftime('%%s', 'now') - 1, 'unixepoch') \
-	AND nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  The ORDER BY clause of this query tries to allocate the same IP-address
-#  which user had last session...
-#
-allocate_find = "\
-	SELECT framedipaddress \
-	FROM ${ippool_table} \
-	WHERE pool_name = '%{control:Pool-Name}' \
-	AND (expiry_time < datetime('now') OR expiry_time IS NULL) \
-	ORDER BY \
-		(username <> '%{User-Name}'), \
-		(callingstationid <> '%{Calling-Station-Id}'), \
-		expiry_time \
-	LIMIT 1 \
-	FOR UPDATE"
-
-#
-#   If you prefer to allocate a random IP address every time, i
-#   use this query instead
-#
-
-#allocate_find = "\
-#	SELECT framedipaddress \
-#	FROM ${ippool_table} \
-# 	WHERE pool_name = '%{control:Pool-Name}' \
-#	AND expiry_time IS NULL \
-#	ORDER BY RAND() \
-# 	LIMIT 1 \
-#	FOR UPDATE"
-
-#
-#  If an IP could not be allocated, check to see if the pool exists or not
-#  This allows the module to differentiate between a full pool and no pool
-#  Note: If you are not running redundant pool modules this query may be
-#  commented out to save running this query every time an ip is not allocated.
-#
-pool_check = "\
-	SELECT id \
-	FROM ${ippool_table} \
-	WHERE pool_name='%{control:Pool-Name}' \
-	LIMIT 1"
-
-#
-#  This is the final IP Allocation query, which saves the allocated ip details
-#
-allocate_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '%{NAS-IP-Address}', \
-		pool_key = '${pool_key}', \
-		callingstationid = '%{Calling-Station-Id}', \
-		username = '%{User-Name}', \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE framedipaddress = '%I' \
-	AND expiry_time IS NULL"
-
-#
-#  This series of queries frees an IP number when an accounting START record arrives
-#
-start_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE nasipaddress = '%{NAS-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting STOP record arrives
-#
-stop_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees an IP number when an accounting
-#  ALIVE record arrives
-#
-alive_update = "\
-	UPDATE ${ippool_table} \
-	SET \
-		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
-	WHERE nasipaddress = '%{Nas-IP-Address}' \
-	AND pool_key = '${pool_key}' \
-	AND username = '%{User-Name}' \
-	AND callingstationid = '%{Calling-Station-Id}' \
-	AND framedipaddress = '%{Framed-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting ON record arrives
-#
-on_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
-#
-#  This series of queries frees the IP numbers allocate to a
-#  NAS when an accounting OFF record arrives
-#
-off_clear = "\
-	UPDATE ${ippool_table} \
-	SET \
-		nasipaddress = '', \
-		pool_key = 0, \
-		callingstationid = '', \
-		username = '', \
-		expiry_time = NULL \
-	WHERE nasipaddress = '%{Nas-IP-Address}'"
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql
deleted file mode 100644
index 86f9121..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
---
--- Table structure for table 'radippool'
---
-CREATE TABLE  (
-  id                    int(11) PRIMARY KEY,
-  pool_name             varchar(30) NOT NULL,
-  framedipaddress       varchar(15) NOT NULL default '',
-  nasipaddress          varchar(15) NOT NULL default '',
-  calledstationid       VARCHAR(30) NOT NULL,
-  callingstationid      VARCHAR(30) NOT NULL,
-  expiry_time           DATETIME NULL default NULL,
-  username              varchar(64) NOT NULL default '',
-  pool_key              varchar(30) NOT NULL
-);
-
-CREATE INDEX radippool_poolname_expire ON radippool(pool_name, expiry_time);
-CREATE INDEX radippool_framedipaddress ON radippool(framedipaddress);
-CREATE INDEX radippool_nasip_poolkey_ipaddress ON radippool(nasipaddress, pool_key, framedipaddress);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf
deleted file mode 100644
index 9223d01..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf
+++ /dev/null
@@ -1,270 +0,0 @@
-# -*- text -*-
-#
-#  main/mssql/queries.conf -- MSSQL configuration for default schema (schema.sql)
-#
-#  $Id: 6fcea6edb5998f9f6c302f6246a88cdddf83dbaa $
-
-# Safe characters list for sql queries. Everything else is replaced
-# with their mime-encoded equivalents.
-# The default list should be ok
-#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used
-# below everywhere a username substitution is needed so you you can
-# be sure the username passed from the client is escaped properly.
-#
-# Uncomment the next line, if you want the sql_user_name to mean:
-#
-#    Use Stripped-User-Name, if it's there.
-#    Else use User-Name, if it's there,
-#    Else use hard-coded string "none" as the user name.
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
-#
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-#  Authorization Queries
-#######################################################################
-#  These queries compare the check items for the user
-#  in ${authcheck_table} and setup the reply items in
-#  ${authreply_table}.  You can use any query/tables
-#  you want, but the return data for each row MUST
-#  be in the  following order:
-#
-#  0. Row ID (currently unused)
-#  1. UserName/GroupName
-#  2. Item Attr Name
-#  3. Item Attr Value
-#  4. Item Attr Operation
-#######################################################################
-# Query for case sensitive usernames was removed. Please contact with me,
-# if you know analog of STRCMP functions for MS SQL.
-
-authorize_check_query = "\
-	SELECT id, UserName, Attribute, Value, op \
-	FROM ${authcheck_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_reply_query = "\
-	SELECT id, UserName, Attribute, Value, op \
-	FROM ${authreply_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_group_check_query = "\
-	SELECT \
-		${groupcheck_table}.id,${groupcheck_table}.GroupName, \
-		${groupcheck_table}.Attribute,${groupcheck_table}.Value, \
-		${groupcheck_table}.op \
-	FROM ${groupcheck_table},${usergroup_table} \
-	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
-	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
-	ORDER BY ${groupcheck_table}.id"
-
-authorize_group_reply_query = "\
-	SELECT \
-		${groupreply_table}.id, ${groupreply_table}.GroupName, \
-		${groupreply_table}.Attribute,${groupreply_table}.Value, \
-		${groupreply_table}.op \
-	FROM ${groupreply_table},${usergroup_table} \
-	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
-	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
-	ORDER BY ${groupreply_table}.id"
-
-group_membership_query = "\
-	SELECT groupname \
-	FROM ${usergroup_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY priority"
-
-#######################################################################
-# Accounting and Post-Auth Queries
-#######################################################################
-# These queries insert/update accounting and authentication records.
-# The query to use is determined by the value of 'reference'.
-# This value is used as a configuration path and should resolve to one
-# or more 'query's. If reference points to multiple queries, and a query
-# fails, the next query is executed.
-#
-# Behaviour is identical to the old 1.x/2.x module, except we can now
-# fail between N queries, and query selection can be based on any
-# combination of attributes, or custom 'Acct-Status-Type' values.
-#######################################################################
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/accounting.sql
-
-	type {
-		accounting-on {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStopTime='%S', \
-					AcctSessionTime=unix_timestamp('%S') - \
-						unix_timestamp(AcctStartTime), \
-					AcctTerminateCause='%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
-					AcctStopDelay = %{%{Acct-Delay-Time}:-0} \
-				WHERE AcctStopTime = 0 \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStartTime <= '%S'"
-		}
-
-		accounting-off {
-			query = "${..accounting-on.query}"
-		}
-
-		start {
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(AcctSessionId,		AcctUniqueId,		UserName, \
-					Realm,			NASIPAddress,		NASPort, \
-					NASPortType,		AcctStartTime, 		AcctSessionTime, \
-					AcctAuthentic,		ConnectInfo_start,	ConnectInfo_stop, \
-					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
-					CallingStationId,	AcctTerminateCause,	ServiceType, \
-					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
-					AcctStopDelay,		XAscendSessionSvrKey) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					'%S', \
-					'0', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					'0', \
-					'0', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'%{Acct-Delay-Time}', \
-					'0', \
-					'%{X-Ascend-Session-Svr-Key}')"
-
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStartTime = '%S', \
-					AcctStartDelay = '%{%{Acct-Delay-Time}:-0}', \
-					ConnectInfo_start = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStopTime = 0"
-		}
-
-		interim-update {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					FramedIPAddress = '%{Framed-IP-Address}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress= '%{NAS-IP-Address}' \
-				AND AcctStopTime = 0"
-
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(AcctSessionId,		AcctUniqueId,		UserName, \
-					Realm,			NASIPAddress,		NASPort, \
-					NASPortType,		AcctSessionTime,	AcctAuthentic, \
-					ConnectInfo_start,	AcctInputOctets,	AcctOutputOctets, \
-					CalledStationId,	CallingStationId,	ServiceType, \
-					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
-					XAscendSessionSvrKey) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'', \
-					'%{Acct-Input-Octets}', \
-					'%{Acct-Output-Octets}', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'0', \
-					'%{X-Ascend-Session-Svr-Key}')"
-		}
-
-		stop {
-			query = "\
-				UPDATE ${....acct_table2} \
-				SET \
-					AcctStopTime = '%S', \
-					AcctSessionTime = '%{Acct-Session-Time}', \
-					AcctInputOctets = '%{Acct-Input-Octets}', \
-					AcctOutputOctets = '%{Acct-Output-Octets}', \
-					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
-					AcctStopDelay = '%{%{Acct-Delay-Time}:-0}', \
-					ConnectInfo_stop = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStopTime = 0"
-
-			query = "\
-				INSERT into ${....acct_table2} \
-					(AcctSessionId,		AcctUniqueId,		UserName, \
-					Realm,			NASIPAddress,		NASPort, \
-					NASPortType,		AcctStopTime,		AcctSessionTime, \
-					AcctAuthentic,		ConnectInfo_start,	ConnectInfo_stop, \
-					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
-					CallingStationId,	AcctTerminateCause,	ServiceType, \
-					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
-					AcctStopDelay) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					'%S', \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'', \
-					'%{Connect-Info}', \
-					'%{Acct-Input-Octets}', \
-					'%{Acct-Output-Octets}', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Acct-Terminate-Cause}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'0', \
-					'%{%{Acct-Delay-Time}:-0}')"
-		}
-	}
-}
-
-post-auth {
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/post-auth.sql
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql
deleted file mode 100644
index 7d48628..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql
+++ /dev/null
@@ -1,252 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/***************************************************************************
- * $Id: f89204918fc5951cb1920d5474563656ec9dee98 $		   *
- *									   *
- * db_mssql.sql                 					   *
- *                                                                         *
- * Database schema for MSSQL server					   *
- *									   *
- * To load:								   *
- *  isql -S db_ip_addr -d db_name -U db_login -P db_passwd -i db_mssql.sql *
- *									   *
- * Based on: db_mysql.sql (Mike Machado <mike@innercite.com>)		   *
- *									   *
- *					Dmitri Ageev <d_ageev@ortcc.ru>    *
- ***************************************************************************/
-
-/****** Object:  Table [radacct]    Script Date: 26.03.02 16:55:17 ******/
-CREATE TABLE [radacct] (
-	[RadAcctId] [numeric](21, 0) IDENTITY (1, 1) NOT NULL ,
-	[AcctSessionId] [varchar] (64) DEFAULT ('') FOR [AcctSessionId],
-	[AcctUniqueId] [varchar] (32) DEFAULT ('') FOR [AcctUniqueId],
-	[UserName] [varchar] (64) DEFAULT ('') FOR [UserName],
-	[GroupName] [varchar] (64) DEFAULT ('') FOR [GroupName],
-	[Realm] [varchar] (64) DEFAULT ('') FOR [Realm],
-	[NASIPAddress] [varchar] (15) DEFAULT ('') FOR [NASIPAddress],
-	[NASPortId] [varchar] (15) NULL ,
-	[NASPortType] [varchar] (32) NULL ,
-	[AcctStartTime] [datetime] NOT NULL ,
-	[AcctStopTime] [datetime] NOT NULL ,
-	[AcctSessionTime] [bigint] NULL ,
-	[AcctAuthentic] [varchar] (32) NULL ,
-	[ConnectInfo_start] [varchar] (32) DEFAULT (null) FOR [ConnectInfo_start],
-	[ConnectInfo_stop] [varchar] (32) DEFAULT (null) FOR [ConnectInfo_stop],
-	[AcctInputOctets] [bigint] NULL ,
-	[AcctOutputOctets] [bigint] NULL ,
-	[CalledStationId] [varchar] (30) DEFAULT ('') FOR [CalledStationId],
-	[CallingStationId] [varchar] (30) DEFAULT ('') FOR [CallingStationId],
-	[AcctTerminateCause] [varchar] (32) DEFAULT ('') FOR [AcctTerminateCause],
-	[ServiceType] [varchar] (32) NULL ,
-	[FramedProtocol] [varchar] (32) NULL ,
-	[FramedIPAddress] [varchar] (15) DEFAULT ('') FOR [FramedIPAddress],
-	[XAscendSessionSvrKey] [varchar] (10) DEFAULT (null) FOR [XAscendSessionSvrKey],
-	[AcctStartDelay] [int] NULL ,
-	[AcctStopDelay] [int] NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radcheck]    Script Date: 26.03.02 16:55:17 ******/
-CREATE TABLE [radcheck] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[UserName] [varchar] (64) NOT NULL ,
-	[Attribute] [varchar] (32) NOT NULL ,
-	[Value] [varchar] (253) NOT NULL ,
-	[op] [char] (2) NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radgroupcheck]    Script Date: 26.03.02 16:55:17 ******/
-CREATE TABLE [radgroupcheck] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[GroupName] [varchar] (64) NOT NULL ,
-	[Attribute] [varchar] (32) NOT NULL ,
-	[Value] [varchar] (253) NOT NULL ,
-	[op] [char] (2) NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radgroupreply]    Script Date: 26.03.02 16:55:17 ******/
-CREATE TABLE [radgroupreply] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[GroupName] [varchar] (64) NOT NULL ,
-	[Attribute] [varchar] (32) NOT NULL ,
-	[Value] [varchar] (253) NOT NULL ,
-	[op] [char] (2) NULL ,
-	[prio] [int] NOT NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radreply]    Script Date: 26.03.02 16:55:18 ******/
-CREATE TABLE [radreply] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[UserName] [varchar] (64) NOT NULL ,
-	[Attribute] [varchar] (32) NOT NULL ,
-	[Value] [varchar] (253) NOT NULL ,
-	[op] [char] (2) NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radusergroup]    Script Date: 26.03.02 16:55:18 ******/
-CREATE TABLE [radusergroup] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[UserName] [varchar] (64) NOT NULL ,
-	[GroupName] [varchar] (64) NULL
-) ON [PRIMARY]
-GO
-
-/****** Object:  Table [radusergroup]    Script Date: 16.04.08 19:44:11 ******/
-CREATE TABLE [radpostauth] (
-	[id] [int] IDENTITY (1, 1) NOT NULL ,
-	[userName] [varchar] (64) NOT NULL ,
-	[pass] [varchar] (64) NOT NULL ,
-	[reply] [varchar] (32) NOT NULL ,
-	[authdate] [datetime] NOT NULL
-)
-GO
-
-ALTER TABLE [radacct] WITH NOCHECK ADD
-	CONSTRAINT [DF_radacct_GroupName] DEFAULT ('') FOR [GroupName],
-	CONSTRAINT [DF_radacct_AcctSessionId] DEFAULT ('') FOR [AcctSessionId],
-	CONSTRAINT [DF_radacct_AcctUniqueId] DEFAULT ('') FOR [AcctUniqueId],
-	CONSTRAINT [DF_radacct_UserName] DEFAULT ('') FOR [UserName],
-	CONSTRAINT [DF_radacct_Realm] DEFAULT ('') FOR [Realm],
-	CONSTRAINT [DF_radacct_NASIPAddress] DEFAULT ('') FOR [NASIPAddress],
-	CONSTRAINT [DF_radacct_NASPortId] DEFAULT (null) FOR [NASPortId],
-	CONSTRAINT [DF_radacct_NASPortType] DEFAULT (null) FOR [NASPortType],
-	CONSTRAINT [DF_radacct_AcctStartTime] DEFAULT ('1900-01-01 00:00:00') FOR [AcctStartTime],
-	CONSTRAINT [DF_radacct_AcctStopTime] DEFAULT ('1900-01-01 00:00:00') FOR [AcctStopTime],
-	CONSTRAINT [DF_radacct_AcctSessionTime] DEFAULT (null) FOR [AcctSessionTime],
-	CONSTRAINT [DF_radacct_AcctAuthentic] DEFAULT (null) FOR [AcctAuthentic],
-	CONSTRAINT [DF_radacct_ConnectInfo_start] DEFAULT (null) FOR [ConnectInfo_start],
-	CONSTRAINT [DF_radacct_ConnectInfo_stop] DEFAULT (null) FOR [ConnectInfo_stop],
-	CONSTRAINT [DF_radacct_AcctInputOctets] DEFAULT (null) FOR [AcctInputOctets],
-	CONSTRAINT [DF_radacct_AcctOutputOctets] DEFAULT (null) FOR [AcctOutputOctets],
-	CONSTRAINT [DF_radacct_CalledStationId] DEFAULT ('') FOR [CalledStationId],
-	CONSTRAINT [DF_radacct_CallingStationId] DEFAULT ('') FOR [CallingStationId],
-	CONSTRAINT [DF_radacct_AcctTerminateCause] DEFAULT ('') FOR [AcctTerminateCause],
-	CONSTRAINT [DF_radacct_ServiceType] DEFAULT (null) FOR [ServiceType],
-	CONSTRAINT [DF_radacct_FramedProtocol] DEFAULT (null) FOR [FramedProtocol],
-	CONSTRAINT [DF_radacct_FramedIPAddress] DEFAULT ('') FOR [FramedIPAddress],
-	CONSTRAINT [DF_radacct_AcctStartDelay] DEFAULT (null) FOR [AcctStartDelay],
-	CONSTRAINT [DF_radacct_AcctStopDelay] DEFAULT (null) FOR [AcctStopDelay],
-	CONSTRAINT [PK_radacct] PRIMARY KEY  NONCLUSTERED
-	(
-		[RadAcctId]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radcheck] WITH NOCHECK ADD
-	CONSTRAINT [DF_radcheck_UserName] DEFAULT ('') FOR [UserName],
-	CONSTRAINT [DF_radcheck_Attribute] DEFAULT ('') FOR [Attribute],
-	CONSTRAINT [DF_radcheck_Value] DEFAULT ('') FOR [Value],
-	CONSTRAINT [DF_radcheck_op] DEFAULT (null) FOR [op],
-	CONSTRAINT [PK_radcheck] PRIMARY KEY  NONCLUSTERED
-	(
-		[id]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radgroupcheck] WITH NOCHECK ADD
-	CONSTRAINT [DF_radgroupcheck_GroupName] DEFAULT ('') FOR [GroupName],
-	CONSTRAINT [DF_radgroupcheck_Attribute] DEFAULT ('') FOR [Attribute],
-	CONSTRAINT [DF_radgroupcheck_Value] DEFAULT ('') FOR [Value],
-	CONSTRAINT [DF_radgroupcheck_op] DEFAULT (null) FOR [op],
-	CONSTRAINT [PK_radgroupcheck] PRIMARY KEY  NONCLUSTERED
-	(
-		[id]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radgroupreply] WITH NOCHECK ADD
-	CONSTRAINT [DF_radgroupreply_GroupName] DEFAULT ('') FOR [GroupName],
-	CONSTRAINT [DF_radgroupreply_Attribute] DEFAULT ('') FOR [Attribute],
-	CONSTRAINT [DF_radgroupreply_Value] DEFAULT ('') FOR [Value],
-	CONSTRAINT [DF_radgroupreply_op] DEFAULT (null) FOR [op],
-	CONSTRAINT [DF_radgroupreply_prio] DEFAULT (0) FOR [prio],
-	CONSTRAINT [PK_radgroupreply] PRIMARY KEY  NONCLUSTERED
-	(
-		[id]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radreply] WITH NOCHECK ADD
-	CONSTRAINT [DF_radreply_UserName] DEFAULT ('') FOR [UserName],
-	CONSTRAINT [DF_radreply_Attribute] DEFAULT ('') FOR [Attribute],
-	CONSTRAINT [DF_radreply_Value] DEFAULT ('') FOR [Value],
-	CONSTRAINT [DF_radreply_op] DEFAULT (null) FOR [op],
-	CONSTRAINT [PK_radreply] PRIMARY KEY  NONCLUSTERED
-	(
-		[id]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radusergroup] WITH NOCHECK ADD
-	CONSTRAINT [DF_radusergroup_UserName] DEFAULT ('') FOR [UserName],
-	CONSTRAINT [DF_radusergroup_GroupName] DEFAULT ('') FOR [GroupName],
-	CONSTRAINT [PK_radusergroup] PRIMARY KEY  NONCLUSTERED
-	(
-		[id]
-	)  ON [PRIMARY]
-GO
-
-ALTER TABLE [radpostauth] WITH NOCHECK ADD
-	CONSTRAINT [DF_radpostauth_userName] DEFAULT ('') FOR [userName],
-	CONSTRAINT [DF_radpostauth_pass] DEFAULT ('') FOR [pass],
-	CONSTRAINT [DF_radpostauth_reply] DEFAULT ('') FOR [reply],
-	CONSTRAINT [DF_radpostauth_authdate] DEFAULT (getdate()) FOR [authdate],
-	CONSTRAINT [PK_radpostauth] PRIMARY KEY NONCLUSTERED
-	(
-		[id]
-	) ON [PRIMARY]
-GO
-
- CREATE  INDEX [UserName] ON [radacct]([UserName]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [FramedIPAddress] ON [radacct]([FramedIPAddress]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [AcctSessionId] ON [radacct]([AcctSessionId]) ON [PRIMARY]
-GO
-
- CREATE  UNIQUE INDEX [AcctUniqueId] ON [radacct]([AcctUniqueId]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [AcctStartTime] ON [radacct]([AcctStartTime]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [AcctStopTime] ON [radacct]([AcctStopTime]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [NASIPAddress] ON [radacct]([NASIPAddress]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [UserName] ON [radcheck]([UserName]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [GroupName] ON [radgroupcheck]([GroupName]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [GroupName] ON [radgroupreply]([GroupName]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [UserName] ON [radreply]([UserName]) ON [PRIMARY]
-GO
-
- CREATE  INDEX [UserName] ON [radusergroup]([UserName]) ON [PRIMARY]
-GO
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf
deleted file mode 100644
index 4087cb5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- text -*-
-##
-## wimax.conf -- MySQL configuration for WiMAX keying
-##
-##	$Id: 26942305017c59d4589d0645cfc79405b98b4c6a $
-
-# Safe characters list for sql queries. Everything else is replaced
-# with their mime-encoded equivalents.
-# The default list should be ok
-#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
-# everywhere a username substitution is needed so you you can be sure
-# the username passed from the client is escaped properly.
-#
-#  Uncomment the next line, if you want the sql_user_name to mean:
-#
-#    Use Stripped-User-Name, if it's there.
-#    Else use User-Name, if it's there,
-#    Else use hard-coded string "DEFAULT" as the user name.
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
-#
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-# Logging of WiMAX SPI -> key mappings
-#######################################################################
-# postauth_query		- Insert some info after authentication
-#######################################################################
-
-postauth_query = "INSERT INTO wimax \
-		  (username, authdate, spi, mipkey, lifetime) \
-		  VALUES ( \
-		  '%{User-Name}', '%S' \
-		  '%{%{reply:WiMAX-MN-hHA-MIP4-SPI}:-%{reply:WiMAX-MN-hHA-MIP6-SPI}}', \
-		  '%{%{reply:WiMAX-MN-hHA-MIP4-Key}:-%{reply:WiMAX-MN-hHA-MIP6-Key}}', '%{%{reply:Session-Timeout}:-86400}' )"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql
deleted file mode 100644
index 828a3c0..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# WiMAX Table structure for table 'wimax',
-# which replaces the "radpostauth" table.
-#
-
-CREATE TABLE wimax (
-  id int(11) NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  authdate timestamp NOT NULL,
-  spi varchar(16) NOT NULL default '',
-  mipkey varchar(400) NOT NULL default '',
-  lifetime int(12) default NULL,
-  PRIMARY KEY  (id),
-  KEY username (username),
-  KEY spi (spi)
-) ;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf
deleted file mode 100644
index 60c0f27..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf
+++ /dev/null
@@ -1,414 +0,0 @@
-# -*- text -*-
-#
-#  main/mysql/queries.conf-- MySQL configuration for default schema (schema.sql)
-#
-#  $Id: 0b3c210d6c0b04350d1a48738764b47f25f51bc4 $
-
-# Safe characters list for sql queries. Everything else is replaced
-# with their mime-encoded equivalents.
-# The default list should be ok
-#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-
-#######################################################################
-#  Connection config
-#######################################################################
-# The character set is not configurable. The default character set of
-# the mysql client library is used. To control the character set,
-# create/edit my.cnf (typically in /etc/mysql/my.cnf or /etc/my.cnf)
-# and enter
-# [client]
-# default-character-set = utf8
-#
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used below
-# everywhere a username substitution is needed so you you can be sure
-# the username passed from the client is escaped properly.
-#
-# Uncomment the next line, if you want the sql_user_name to mean:
-#
-#	Use Stripped-User-Name, if it's there.
-#	Else use User-Name, if it's there,
-#	Else use hard-coded string "DEFAULT" as the user name.
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
-#
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-# Default profile
-#######################################################################
-# This is the default profile. It is found in SQL by group membership.
-# That means that this profile must be a member of at least one group
-# which will contain the corresponding check and reply items.
-# This profile will be queried in the authorize section for every user.
-# The point is to assign all users a default profile without having to
-# manually add each one to a group that will contain the profile.
-# The SQL module will also honor the User-Profile attribute. This
-# attribute can be set anywhere in the authorize section (ie the users
-# file). It is found exactly as the default profile is found.
-# If it is set then it will *overwrite* the default profile setting.
-# The idea is to select profiles based on checks on the incoming packets,
-# not on user group membership. For example:
-# -- users file --
-# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
-# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
-#
-# By default the default_user_profile is not set
-#
-#default_user_profile = "DEFAULT"
-
-#######################################################################
-# NAS Query
-#######################################################################
-# This query retrieves the radius clients
-#
-# 0. Row ID (currently unused)
-# 1. Name (or IP address)
-# 2. Shortname
-# 3. Type
-# 4. Secret
-# 5. Server
-#######################################################################
-
-client_query = "\
-	SELECT id, nasname, shortname, type, secret, server \
-	FROM ${client_table}"
-
-#######################################################################
-# Authorization Queries
-#######################################################################
-# These queries compare the check items for the user
-# in ${authcheck_table} and setup the reply items in
-# ${authreply_table}. You can use any query/tables
-# you want, but the return data for each row MUST
-# be in the following order:
-#
-# 0. Row ID (currently unused)
-# 1. UserName/GroupName
-# 2. Item Attr Name
-# 3. Item Attr Value
-# 4. Item Attr Operation
-#######################################################################
-# Use these for case sensitive usernames.
-
-#authorize_check_query = "\
-#	SELECT id, username, attribute, value, op \
-#	FROM ${authcheck_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY id"
-
-#authorize_reply_query = "\
-#	SELECT id, username, attribute, value, op \
-#	FROM ${authreply_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY id"
-
-#
-#  The default queries are case insensitive. (for compatibility with
-#  older versions of FreeRADIUS)
-#
-authorize_check_query = "\
-	SELECT id, username, attribute, value, op \
-	FROM ${authcheck_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_reply_query = "\
-	SELECT id, username, attribute, value, op \
-	FROM ${authreply_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-#
-#  Use these for case sensitive usernames.
-#
-group_membership_query = "\
-#	SELECT groupname \
-#	FROM ${usergroup_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY priority"
-
-group_membership_query = "\
-	SELECT groupname \
-	FROM ${usergroup_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY priority"
-
-authorize_group_check_query = "\
-	SELECT id, groupname, attribute, \
-	Value, op \
-	FROM ${groupcheck_table} \
-	WHERE groupname = '%{Sql-Group}' \
-	ORDER BY id"
-
-authorize_group_reply_query = "\
-	SELECT id, groupname, attribute, \
-	value, op \
-	FROM ${groupreply_table} \
-	WHERE groupname = '%{Sql-Group}' \
-	ORDER BY id"
-
-#######################################################################
-# Simultaneous Use Checking Queries
-#######################################################################
-# simul_count_query	- query for the number of current connections
-#			- If this is not defined, no simultaneouls use checking
-#			- will be performed by this module instance
-# simul_verify_query	- query to return details of current connections
-#				for verification
-#			- Leave blank or commented out to disable verification step
-#			- Note that the returned field order should not be changed.
-#######################################################################
-
-#
-#  Uncomment simul_count_query to enable simultaneous use checking
-#
-#simul_count_query = "\
-#	SELECT COUNT(*) \
-#	FROM ${acct_table1} \
-#	WHERE username = '%{SQL-User-Name}' \
-#	AND acctstoptime IS NULL"
-
-simul_verify_query = "\
-	SELECT \
-		radacctid, acctsessionid, username, nasipaddress, nasportid, framedipaddress, \
-		callingstationid, framedprotocol \
-	FROM ${acct_table1} \
-	WHERE username = '%{SQL-User-Name}' \
-	AND acctstoptime IS NULL"
-
-#######################################################################
-# Accounting and Post-Auth Queries
-#######################################################################
-# These queries insert/update accounting and authentication records.
-# The query to use is determined by the value of 'reference'.
-# This value is used as a configuration path and should resolve to one
-# or more 'query's. If reference points to multiple queries, and a query
-# fails, the next query is executed.
-#
-# Behaviour is identical to the old 1.x/2.x module, except we can now
-# fail between N queries, and query selection can be based on any
-# combination of attributes, or custom 'Acct-Status-Type' values.
-#######################################################################
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/accounting.sql
-
-	column_list = "\
-		acctsessionid,		acctuniqueid,		username, \
-		realm,			nasipaddress,		nasportid, \
-		nasporttype,		acctstarttime,		acctupdatetime, \
-		acctstoptime,		acctsessiontime, 	acctauthentic, \
-		connectinfo_start,	connectinfo_stop, 	acctinputoctets, \
-		acctoutputoctets,	calledstationid, 	callingstationid, \
-		acctterminatecause,	servicetype,		framedprotocol, \
-		framedipaddress"
-
-	type {
-		accounting-on {
-			#
-			#  Bulk terminate all sessions associated with a given NAS
-			#
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					acctstoptime = FROM_UNIXTIME(\
-						%{integer:Event-Timestamp}), \
-					acctsessiontime	= '%{integer:Event-Timestamp}' \
-						- UNIX_TIMESTAMP(acctstarttime), \
-					acctterminatecause = '%{%{Acct-Terminate-Cause}:-NAS-Reboot}' \
-				WHERE acctstoptime IS NULL \
-				AND nasipaddress   = '%{NAS-IP-Address}' \
-				AND acctstarttime <= FROM_UNIXTIME(\
-					%{integer:Event-Timestamp})"
-		}
-
-		accounting-off {
-			query = "${..accounting-on.query}"
-		}
-
-		start {
-			#
-			#  Insert a new record into the sessions table
-			#
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					NULL, \
-					'0', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					'0', \
-					'0', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-
-			#
-			#  Key constraints prevented us from inserting a new session,
-			#  use the alternate query to update an existing session.
-			#
-			query = "\
-				UPDATE ${....acct_table1} SET \
-					acctstarttime	= FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					acctupdatetime	= FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					connectinfo_start = '%{Connect-Info}' \
-				WHERE acctsessionid = '%{Acct-Session-Id}' \
-				AND username		= '%{SQL-User-Name}' \
-				AND nasipaddress	= '%{NAS-IP-Address}'"
-		}
-
-		interim-update {
-			#
-			#  Update an existing session and calculate the interval
-			#  between the last data we received for the session and this
-			#  update. This can be used to find stale sessions.
-			#
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					acctupdatetime  = (@acctupdatetime_old:=acctupdatetime), \
-					acctupdatetime  = FROM_UNIXTIME(\
-						%{integer:Event-Timestamp}), \
-					acctinterval    = %{integer:Event-Timestamp} - \
-						UNIX_TIMESTAMP(@acctupdatetime_old), \
-					framedipaddress = '%{Framed-IP-Address}', \
-					acctsessiontime = '%{Acct-Session-Time}', \
-					acctinputoctets = '%{%{Acct-Input-Gigawords}:-0}' \
-						<< 32 | '%{%{Acct-Input-Octets}:-0}', \
-					acctoutputoctets = '%{%{Acct-Output-Gigawords}:-0}' \
-						<< 32 | '%{%{Acct-Output-Octets}:-0}' \
-				WHERE acctsessionid     = '%{Acct-Session-Id}' \
-				AND username            = '%{SQL-User-Name}' \
-				AND nasipaddress        = '%{NAS-IP-Address}'"
-
-			#
-			#  The update condition matched no existing sessions. Use
-			#  the values provided in the update to create a new session.
-			#
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					FROM_UNIXTIME(%{integer:Event-Timestamp} - \
-						%{%{Acct-Session-Time}:-0}), \
-					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					NULL, \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					'%{%{Acct-Input-Gigawords}:-0}' << 32 | \
-						'%{%{Acct-Input-Octets}:-0}', \
-					'%{%{Acct-Output-Gigawords}:-0}' << 32 | \
-						'%{%{Acct-Output-Octets}:-0}', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-		}
-
-		stop {
-			#
-			#  Session has terminated, update the stop time and statistics.
-			#
-			query = "\
-				UPDATE ${....acct_table2} SET \
-					acctstoptime	= FROM_UNIXTIME(\
-						%{integer:Event-Timestamp}), \
-					acctsessiontime	= '%{Acct-Session-Time}', \
-					acctinputoctets	= '%{%{Acct-Input-Gigawords}:-0}' \
-						<< 32 | '%{%{Acct-Input-Octets}:-0}', \
-					acctoutputoctets = '%{%{Acct-Output-Gigawords}:-0}' \
-						<< 32 | '%{%{Acct-Output-Octets}:-0}', \
-					acctterminatecause = '%{Acct-Terminate-Cause}', \
-					connectinfo_stop = '%{Connect-Info}' \
-				WHERE acctsessionid 	= '%{Acct-Session-Id}' \
-				AND username		= '%{SQL-User-Name}' \
-				AND nasipaddress	= '%{NAS-IP-Address}'"
-
-			#
-			#  The update condition matched no existing sessions. Use
-			#  the values provided in the update to create a new session.
-			#
-			query = "\
-				INSERT INTO ${....acct_table2} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					FROM_UNIXTIME(%{integer:Event-Timestamp} - \
-						%{%{Acct-Session-Time}:-0}), \
-					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', '', \
-					'%{Connect-Info}', \
-					'%{%{Acct-Input-Gigawords}:-0}' << 32 | \
-						'%{%{Acct-Input-Octets}:-0}', \
-					'%{%{Acct-Output-Gigawords}:-0}' << 32 | \
-						'%{%{Acct-Output-Octets}:-0}', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Acct-Terminate-Cause}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-		}
-	}
-}
-
-#######################################################################
-# Authentication Logging Queries
-#######################################################################
-# postauth_query	- Insert some info after authentication
-#######################################################################
-
-post-auth {
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/post-auth.sql
-
-	query =	"\
-		INSERT INTO ${..postauth_table} \
-			(username, pass, reply, authdate) \
-		VALUES ( \
-			'%{SQL-User-Name}', \
-			'%{%{User-Password}:-%{Chap-Password}}', \
-			'%{reply:Packet-Type}', \
-			'%S')"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql
deleted file mode 100644
index ed53201..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql
+++ /dev/null
@@ -1,166 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-###########################################################################
-# $Id: c5185bee856646733a6bd9b341109cde0688b8f1 $                 #
-#                                                                         #
-#  schema.sql                       rlm_sql - FreeRADIUS SQL Module       #
-#                                                                         #
-#     Database schema for MySQL rlm_sql module                            #
-#                                                                         #
-#     To load:                                                            #
-#         mysql -uroot -prootpass radius < schema.sql                     #
-#                                                                         #
-#                                   Mike Machado <mike@innercite.com>     #
-###########################################################################
-#
-# Table structure for table 'radacct'
-#
-
-CREATE TABLE radacct (
-  radacctid bigint(21) NOT NULL auto_increment,
-  acctsessionid varchar(64) NOT NULL default '',
-  acctuniqueid varchar(32) NOT NULL default '',
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  realm varchar(64) default '',
-  nasipaddress varchar(15) NOT NULL default '',
-  nasportid varchar(15) default NULL,
-  nasporttype varchar(32) default NULL,
-  acctstarttime datetime NULL default NULL,
-  acctupdatetime datetime NULL default NULL,
-  acctstoptime datetime NULL default NULL,
-  acctinterval int(12) default NULL,
-  acctsessiontime int(12) unsigned default NULL,
-  acctauthentic varchar(32) default NULL,
-  connectinfo_start varchar(50) default NULL,
-  connectinfo_stop varchar(50) default NULL,
-  acctinputoctets bigint(20) default NULL,
-  acctoutputoctets bigint(20) default NULL,
-  calledstationid varchar(50) NOT NULL default '',
-  callingstationid varchar(50) NOT NULL default '',
-  acctterminatecause varchar(32) NOT NULL default '',
-  servicetype varchar(32) default NULL,
-  framedprotocol varchar(32) default NULL,
-  framedipaddress varchar(15) NOT NULL default '',
-  PRIMARY KEY (radacctid),
-  UNIQUE KEY acctuniqueid (acctuniqueid),
-  KEY username (username),
-  KEY framedipaddress (framedipaddress),
-  KEY acctsessionid (acctsessionid),
-  KEY acctsessiontime (acctsessiontime),
-  KEY acctstarttime (acctstarttime),
-  KEY acctinterval (acctinterval),
-  KEY acctstoptime (acctstoptime),
-  KEY nasipaddress (nasipaddress)
-) ENGINE = INNODB;
-
-#
-# Table structure for table 'radcheck'
-#
-
-CREATE TABLE radcheck (
-  id int(11) unsigned NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253) NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY username (username(32))
-);
-
-#
-# Table structure for table 'radgroupcheck'
-#
-
-CREATE TABLE radgroupcheck (
-  id int(11) unsigned NOT NULL auto_increment,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253)  NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY groupname (groupname(32))
-);
-
-#
-# Table structure for table 'radgroupreply'
-#
-
-CREATE TABLE radgroupreply (
-  id int(11) unsigned NOT NULL auto_increment,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253)  NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY groupname (groupname(32))
-);
-
-#
-# Table structure for table 'radreply'
-#
-
-CREATE TABLE radreply (
-  id int(11) unsigned NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64) NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253) NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY username (username(32))
-);
-
-
-#
-# Table structure for table 'radusergroup'
-#
-
-CREATE TABLE radusergroup (
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  priority int(11) NOT NULL default '1',
-  KEY username (username(32))
-);
-
-#
-# Table structure for table 'radpostauth'
-#
-CREATE TABLE radpostauth (
-  id int(11) NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  pass varchar(64) NOT NULL default '',
-  reply varchar(32) NOT NULL default '',
-  authdate timestamp NOT NULL,
-  PRIMARY KEY  (id)
-) ENGINE = INNODB;
-
-#
-# Table structure for table 'nas'
-#
-CREATE TABLE nas (
-  id int(10) NOT NULL auto_increment,
-  nasname varchar(128) NOT NULL,
-  shortname varchar(32),
-  type varchar(30) DEFAULT 'other',
-  ports int(5),
-  secret varchar(60) DEFAULT 'secret' NOT NULL,
-  server varchar(64),
-  community varchar(50),
-  description varchar(200) DEFAULT 'RADIUS Client',
-  PRIMARY KEY (id),
-  KEY nasname (nasname)
-);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql
deleted file mode 100644
index 2133f45..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql
+++ /dev/null
@@ -1,40 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# -*- text -*-
-##
-## admin.sql -- MySQL commands for creating the RADIUS user.
-##
-##	WARNING: You should change 'localhost' and 'radpass'
-##		 to something else.  Also update raddb/sql.conf
-##		 with the new RADIUS password.
-##
-##	$Id: aff0505a473c67b65cfc19fae079454a36d4e119 $
-
-#
-#  Create default administrator for RADIUS
-#
-CREATE USER 'radius'@'localhost';
-SET PASSWORD FOR 'radius'@'localhost' = PASSWORD('radpass');
-
-# The server can read any table in SQL
-GRANT SELECT ON radius.* TO 'radius'@'localhost';
-
-# The server can write to the accounting and post-auth logging table.
-#
-#  i.e.
-GRANT ALL on radius.radacct TO 'radius'@'localhost';
-GRANT ALL on radius.radpostauth TO 'radius'@'localhost';
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README
deleted file mode 100644
index 71f5aa3..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README
+++ /dev/null
@@ -1,5 +0,0 @@
-  The SQL schema and 'create admin user" scripts are here in order to
-simplify the process of using MySQL cluster.
-
-  The queries are NOT located here, because the database driver for
-MySQL cluster is just "mysql", and not "ndb".
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql
deleted file mode 100644
index 390ab94..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql
+++ /dev/null
@@ -1,151 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-###########################################################################
-# $Id: a7f4c3121ded2b6557294de8bcab832c5715d038 $                 #
-#                                                                         #
-#  schema.sql                       rlm_sql - FreeRADIUS SQL Module       #
-#                                                                         #
-#     Database schema for MySQL Cluster.				  #
-#     The only difference between this file and ../mysql/schema.sql       #
-#     is the definition of the storage engine.                            #
-#                                                                         #
-#     To load:                                                            #
-#         mysql -uroot -prootpass radius < schema.sql                     #
-#                                                                         #
-#                                   Mike Machado <mike@innercite.com>     #
-###########################################################################
-#
-# Table structure for table 'radacct'
-#
-
-CREATE TABLE radacct (
-  radacctid bigint(21) NOT NULL auto_increment,
-  acctsessionid varchar(64) NOT NULL default '',
-  acctuniqueid varchar(32) NOT NULL default '',
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  realm varchar(64) default '',
-  nasipaddress varchar(15) NOT NULL default '',
-  nasportid varchar(15) default NULL,
-  nasporttype varchar(32) default NULL,
-  acctstarttime datetime NULL default NULL,
-  acctupdatetime datetime NULL default NULL,
-  acctstoptime datetime NULL default NULL,
-  acctinterval int(12) default NULL,
-  acctsessiontime int(12) default NULL,
-  acctauthentic varchar(32) default NULL,
-  connectinfo_start varchar(50) default NULL,
-  connectinfo_stop varchar(50) default NULL,
-  acctinputoctets bigint(20) default NULL,
-  acctoutputoctets bigint(20) default NULL,
-  calledstationid varchar(50) NOT NULL default '',
-  callingstationid varchar(50) NOT NULL default '',
-  acctterminatecause varchar(32) NOT NULL default '',
-  servicetype varchar(32) default NULL,
-  framedprotocol varchar(32) default NULL,
-  framedipaddress varchar(15) NOT NULL default ''
-  PRIMARY KEY  (radacctid),
-  UNIQUE KEY acctuniqueid (acctuniqueid),
-  KEY username (username),
-  KEY framedipaddress (framedipaddress),
-  KEY acctsessionid (acctsessionid),
-  KEY acctsessiontime (acctsessiontime),
-  KEY acctstarttime (acctstarttime),
-  KEY acctstoptime (acctstoptime),
-  KEY nasipaddress (nasipaddress)
-) ENGINE=ndbcluster;
-
-#
-# Table structure for table 'radcheck'
-#
-
-CREATE TABLE radcheck (
-  id int(11) unsigned NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253) NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY username (username(32))
-) ENGINE=ndbcluster;
-
-#
-# Table structure for table 'radgroupcheck'
-#
-
-CREATE TABLE radgroupcheck (
-  id int(11) unsigned NOT NULL auto_increment,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253)  NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY groupname (groupname(32))
-) ENGINE=ndbcluster;
-
-#
-# Table structure for table 'radgroupreply'
-#
-
-CREATE TABLE radgroupreply (
-  id int(11) unsigned NOT NULL auto_increment,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253)  NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY groupname (groupname(32))
-) ENGINE=ndbcluster;
-
-#
-# Table structure for table 'radreply'
-#
-
-CREATE TABLE radreply (
-  id int(11) unsigned NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64) NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253) NOT NULL default '',
-  PRIMARY KEY  (id),
-  KEY username (username(32))
-) ENGINE=ndbcluster;
-
-
-#
-# Table structure for table 'radusergroup'
-#
-
-CREATE TABLE radusergroup (
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  priority int(11) NOT NULL default '1',
-  KEY username (username(32))
-) ENGINE=ndbcluster;
-
-#
-# Table structure for table 'radpostauth'
-#
-
-CREATE TABLE radpostauth (
-  id int(11) NOT NULL auto_increment,
-  username varchar(64) NOT NULL default '',
-  pass varchar(64) NOT NULL default '',
-  reply varchar(32) NOT NULL default '',
-  authdate timestamp NOT NULL,
-  PRIMARY KEY  (id)
-) ENGINE=ndbcluster;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql
deleted file mode 100644
index f7fe41a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql
+++ /dev/null
@@ -1,41 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# -*- text -*-
-##
-## admin.sql -- MySQL commands for creating the RADIUS user.
-##
-##	WARNING: You should change 'localhost' and 'radpass'
-##		 to something else.  Also update raddb/sql.conf
-##		 with the new RADIUS password.
-##
-##	$Id: 5c91384c0991ea9614b7c798a1ab4c89ca227115 $
-
-#
-#  Create default administrator for RADIUS
-#
-CREATE USER 'radius'@'localhost';
-SET PASSWORD FOR 'radius'@'localhost' = PASSWORD('radpass');
-
-# The server can read any table in SQL
-GRANT ALL ON radius.* TO 'radius'@'localhost' identified by 'radpass';
-GRANT ALL ON radius.* TO 'radius'@'radsrvr' identified by 'radpass';
-
-# The server can write to the accounting and post-auth logging table.
-#
-#  i.e.
-#GRANT ALL on radius.radacct TO 'radius'@'localhost' identified by 'radpass';
-#GRANT ALL on radius.radacct TO 'radius'@'radsrvr' identified by 'radpass';
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf
deleted file mode 100644
index c062b66..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf
+++ /dev/null
@@ -1,382 +0,0 @@
-# -*- text -*-
-#
-#  main/oracle/queries.conf -- Oracle configuration for default schema (schema.sql)
-#
-#  $Id: ca22f5f5c9bf5dff47e60fb2bed56d6b161a4d08 $
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
-# everywhere a username substitution is needed so you you can be sure
-# the username passed from the client is escaped properly.
-#
-#  Uncomment the next line, if you want the sql_user_name to mean:
-#
-#    Use Stripped-User-Name, if it's there.
-#    Else use User-Name, if it's there,
-#    Else use hard-coded string "DEFAULT" as the user name.
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
-#
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-#  Default profile
-#######################################################################
-# This is the default profile. It is found in SQL by group membership.
-# That means that this profile must be a member of at least one group
-# which will contain the corresponding check and reply items.
-# This profile will be queried in the authorize section for every user.
-# The point is to assign all users a default profile without having to
-# manually add each one to a group that will contain the profile.
-# The SQL module will also honor the User-Profile attribute. This
-# attribute can be set anywhere in the authorize section (ie the users
-# file). It is found exactly as the default profile is found.
-# If it is set then it will *overwrite* the default profile setting.
-# The idea is to select profiles based on checks on the incoming packets,
-# not on user group membership. For example:
-# -- users file --
-# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
-# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
-#
-# By default the default_user_profile is not set
-#
-#default_user_profile = "DEFAULT"
-#
-# Determines if we will query the default_user_profile or the User-Profile
-# if the user is not found. If the profile is found then we consider the user
-# found. By default this is set to 'no'.
-#
-#query_on_not_found = no
-
-
-#######################################################################
-#  NAS Query
-#######################################################################
-#  This query retrieves the radius clients
-#
-#  0. Row ID (currently unused)
-#  1. Name (or IP address)
-#  2. Shortname
-#  3. Type
-#  4. Secret
-#  5. Virtual server
-#######################################################################
-
-client_query = "\
-	SELECT id, nasname, shortname, type, secret, server \
-	FROM ${client_table}"
-
-#######################################################################
-#  Authorization Queries
-#######################################################################
-#  These queries compare the check items for the user
-#  in ${authcheck_table} and setup the reply items in
-#  ${authreply_table}.  You can use any query/tables
-#  you want, but the return data for each row MUST
-#  be in the  following order:
-#
-#  0. Row ID (currently unused)
-#  1. UserName/GroupName
-#  2. Item Attr Name
-#  3. Item Attr Value
-#  4. Item Attr Operation
-#######################################################################
-#
-# WARNING: Oracle is case sensitive
-#
-# The main difference between MySQL and Oracle queries is the date format.
-# You must use the TO_DATE function to transform the radius date format to
-# the Oracle date format, and put NULL otherwise '0' in a void date field.
-#
-#######################################################################
-
-authorize_check_query = "\
-	SELECT id, UserName, Attribute, Value, op \
-	FROM ${authcheck_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_reply_query = "\
-	SELECT id, UserName, Attribute, Value, op \
-	FROM ${authreply_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_group_check_query = "\
-	SELECT \
-		${groupcheck_table}.id, ${groupcheck_table}.GroupName, ${groupcheck_table}.Attribute, \
-		${groupcheck_table}.Value,${groupcheck_table}.op \
-	FROM ${groupcheck_table}, ${usergroup_table} \
-	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
-	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
-	ORDER BY ${groupcheck_table}.id"
-
-authorize_group_reply_query = "\
-	SELECT \
-		${groupreply_table}.id, ${groupreply_table}.GroupName, ${groupreply_table}.Attribute, \
-		${groupreply_table}.Value, ${groupreply_table}.op \
-	FROM ${groupreply_table}, ${usergroup_table} \
-	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
-	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
-	ORDER BY ${groupreply_table}.id"
-
-#######################################################################
-# Simultaneous Use Checking Queries
-#######################################################################
-# simul_count_query	- query for the number of current connections
-#			- If this is not defined, no simultaneouls use checking
-#			- will be performed by this module instance
-# simul_verify_query	- query to return details of current connections for verification
-#			- Leave blank or commented out to disable verification step
-#			- Note that the returned field order should not be changed.
-#######################################################################
-
-#
-#  Uncomment simul_count_query to enable simultaneous use checking
-#
-#simul_count_query = "\
-#	SELECT COUNT(*) \
-#	FROM ${acct_table1} \
-#	WHERE UserName = '%{SQL-User-Name}' \
-#	AND AcctStopTime IS NULL"
-
-simul_verify_query = "\
-	SELECT \
-		RadAcctId, AcctSessionId, UserName, NASIPAddress, NASPortId, \
-		FramedIPAddress, CallingStationId, FramedProtocol \
-	FROM ${acct_table1} \
-	WHERE UserName='%{SQL-User-Name}' \
-	AND AcctStopTime IS NULL"
-
-#######################################################################
-# Group Membership Queries
-#######################################################################
-# group_membership_query	- Check user group membership
-#######################################################################
-
-group_membership_query = "\
-	SELECT GroupName \
-	FROM ${usergroup_table} \
-	WHERE UserName='%{SQL-User-Name}'"
-
-#######################################################################
-# Accounting and Post-Auth Queries
-#######################################################################
-# These queries insert/update accounting and authentication records.
-# The query to use is determined by the value of 'reference'.
-# This value is used as a configuration path and should resolve to one
-# or more 'query's. If reference points to multiple queries, and a query
-# fails, the next query is executed.
-#
-# Behaviour is identical to the old 1.x/2.x module, except we can now
-# fail between N queries, and query selection can be based on any
-# combination of attributes, or custom 'Acct-Status-Type' values.
-#######################################################################
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#		logfile = ${logdir}/accounting.sql
-
-	type {
-		accounting-on {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStopTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
-					AcctSessionTime = round((TO_DATE('%S','yyyy-mm-dd hh24:mi:ss') - \
-						TO_DATE(TO_CHAR(acctstarttime, 'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss'))*86400), \
-					AcctTerminateCause='%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
-					AcctStopDelay = %{%{Acct-Delay-Time}:-0} \
-				WHERE AcctStopTime IS NULL \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStartTime <= TO_DATE('%S','yyyy-mm-dd hh24:mi:ss')"
-		}
-
-		accounting-off {
-			query = "${..accounting-on.query}"
-		}
-
-		start {
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
-					UserName,		Realm,			NASIPAddress, \
-					NASPortId,		NASPortType,		AcctStartTime, \
-					AcctStopTime,		AcctSessionTime,	AcctAuthentic, \
-					ConnectInfo_start,	ConnectInfo_stop,	AcctInputOctets, \
-					AcctOutputOctets,	CalledStationId,	CallingStationId, \
-					AcctTerminateCause,	ServiceType,		FramedProtocol, \
-					FramedIPAddress,	AcctStartDelay,		AcctStopDelay, \
-					XAscendSessionSvrKey) \
-				VALUES(\
-					'', \
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
-					NULL, \
-					'0', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					'0', \
-					'0', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'%{Acct-Delay-Time}', \
-					'0', \
-					'%{X-Ascend-Session-Svr-Key}')"
-
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStartTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
-					AcctStartDelay = '%{%{Acct-Delay-Time}:-0}', \
-					ConnectInfo_start = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStopTime IS NULL"
-		}
-
-		interim-update {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					FramedIPAddress = NULLIF('%{Framed-IP-Address}', ''), \
-					AcctSessionTime = '%{Acct-Session-Time}', \
-					AcctInputOctets = '%{Acct-Input-Octets}' + \
-						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
-					AcctOutputOctets = '%{Acct-Output-Octets}' +  \
-						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296) \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress= '%{NAS-IP-Address}' \
-				AND AcctStopTime IS NULL"
-
-			query = "\
-				INSERT into ${....acct_table1} \
-					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
-					UserName,		Realm,			NASIPAddress, \
-					NASPortId,		NASPortType,		AcctStartTime, \
-					AcctSessionTime, 	AcctAuthentic,		ConnectInfo_start, \
-					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
-					CallingStationId,	ServiceType,		FramedProtocol, \
-					FramedIPAddress,	AcctStartDelay,		XAscendSessionSvrKey) \
-				VALUES(\
-					'', \
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					NULL, \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'', \
-					'%{Acct-Input-Octets}' + \
-						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
-					'%{Acct-Output-Octets}' +  \
-						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'0', \
-					'%{X-Ascend-Session-Svr-Key}')"
-		}
-
-		stop {
-			query = "\
-				UPDATE ${....acct_table2} \
-				SET \
-					AcctStopTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
-					AcctSessionTime = '%{Acct-Session-Time}', \
-					AcctInputOctets = '%{Acct-Input-Octets}' + \
-						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
-					AcctOutputOctets = '%{Acct-Output-Octets}' +  \
-						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
-					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
-					AcctStopDelay = '%{%{Acct-Delay-Time}:-0}', \
-					ConnectInfo_stop = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{NAS-IP-Address}' \
-				AND AcctStopTime IS NULL"
-
-			query = "\
-				INSERT into ${....acct_table2} \
-					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
-					 UserName,		Realm,			NASIPAddress, \
-					 NASPortId,		NASPortType,		AcctStartTime, \
-					 AcctStopTime,		AcctSessionTime,	AcctAuthentic, \
-					 ConnectInfo_start,	ConnectInfo_stop,	AcctInputOctets, \
-					 AcctOutputOctets,	CalledStationId,	CallingStationId, \
-					 AcctTerminateCause,	ServiceType,		FramedProtocol, \
-					 FramedIPAddress,	AcctStartDelay,		AcctStopDelay) \
-				VALUES(\
-					'', \
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port-Id}', \
-					'%{NAS-Port-Type}', \
-					NULL, \
-					TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'', \
-					'%{Connect-Info}', \
-					'%{Acct-Input-Octets}' + \
-						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
-					'%{Acct-Output-Octets}' + \
-						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Acct-Terminate-Cause}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}', \
-					'0', \
-					'%{%{Acct-Delay-Time}:-0}')"
-
-		}
-	}
-}
-
-#######################################################################
-# Authentication Logging Queries
-#######################################################################
-# postauth_query                - Insert some info after authentication
-#######################################################################
-
-post-auth {
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/post-auth.sql
-	query = "\
-		INSERT INTO ${..postauth_table} \
-			(username, pass, reply, authdate) \
-		VALUES (\
-			'%{User-Name}', \
-			'%{%{User-Password}:-%{Chap-Password}}', \
-			'%{reply:Packet-Type}', \
-			TO_TIMESTAMP('%S','YYYY-MM-DDHH24:MI:SS'))"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql
deleted file mode 100644
index d929aac..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql
+++ /dev/null
@@ -1,246 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/*
- * $Id: c11295fa7307a7c05a586f5354dd59de32c059de $
- *
- * Oracle schema for FreeRADIUS
- *
- *
- * NOTE: Which columns are NULLable??
- */
-
-/*
- * Table structure for table 'radacct'
- */
-CREATE TABLE radacct (
-	radacctid		INT PRIMARY KEY,
-	acctsessionid		VARCHAR(96) NOT NULL,
-	acctuniqueid		VARCHAR(32),
-	username		VARCHAR(64) NOT NULL,
-	groupname		VARCHAR(32),
-	realm			VARCHAR(30),
-	nasipaddress		VARCHAR(15) NOT NULL,
-	nasportid		VARCHAR(32),
-	nasporttype		VARCHAR(32),
-	acctstarttime		TIMESTAMP WITH TIME ZONE,
-	acctstoptime		TIMESTAMP WITH TIME ZONE,
-	acctsessiontime		NUMERIC(19),
-	acctauthentic		VARCHAR(32),
-	connectinfo_start	VARCHAR(50),
-	connectinfo_stop	VARCHAR(50),
-	acctinputoctets		NUMERIC(19),
-	acctoutputoctets	NUMERIC(19),
-	calledstationid		VARCHAR(50),
-	callingstationid	VARCHAR(50),
-	acctterminatecause	VARCHAR(32),
-	servicetype		VARCHAR(32),
-	framedprotocol		VARCHAR(32),
-	framedipaddress		VARCHAR(15),
-	acctstartdelay		NUMERIC(12),
-	acctstopdelay		NUMERIC(12),
-	XAscendSessionSvrKey	VARCHAR(10)
-);
-
-CREATE UNIUQE INDEX radacct_idx0
-	ON radacct(acctuniqueid);
-CREATE UNIQUE INDEX radacct_idx1
-	ON radacct(acctsessionid,username,acctstarttime,
-		acctstoptime,nasipaddress,framedipaddress);
-
-CREATE SEQUENCE radacct_seq START WITH 1 INCREMENT BY 1;
-
-/* Trigger to emulate a serial # on the primary key */
-CREATE OR REPLACE TRIGGER radacct_serialnumber
-	BEFORE INSERT OR UPDATE OF radacctid ON radacct
-	FOR EACH ROW
-	BEGIN
-		if ( :new.radacctid = 0 or :new.radacctid is null ) then
-			SELECT radacct_seq.nextval into :new.radacctid from dual;
-		end if;
-	END;
-/
-
-/*
- * Table structure for table 'radcheck'
- */
-CREATE TABLE radcheck (
-	id 		INT PRIMARY KEY,
-	username	VARCHAR(30) NOT NULL,
-	attribute	VARCHAR(64),
-	op		VARCHAR(2) NOT NULL,
-	value		VARCHAR(40)
-);
-CREATE SEQUENCE radcheck_seq START WITH 1 INCREMENT BY 1;
-
-/* Trigger to emulate a serial # on the primary key */
-CREATE OR REPLACE TRIGGER radcheck_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radcheck
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radcheck_seq.nextval into :new.id from dual;
-		end if;
-	END;
-/
-
-/*
- * Table structure for table 'radgroupcheck'
- */
-CREATE TABLE radgroupcheck (
-	id 		INT PRIMARY KEY,
-	groupname	VARCHAR(20) UNIQUE NOT NULL,
-	attribute	VARCHAR(64),
-	op		CHAR(2) NOT NULL,
-	value		VARCHAR(40)
-);
-CREATE SEQUENCE radgroupcheck_seq START WITH 1 INCREMENT BY 1;
-
-/*
- * Table structure for table 'radgroupreply'
- */
-CREATE TABLE radgroupreply (
-	id		INT PRIMARY KEY,
-	GroupName	VARCHAR(20) UNIQUE NOT NULL,
-	Attribute	VARCHAR(64),
-	op		CHAR(2) NOT NULL,
-	Value		VARCHAR(40)
-);
-CREATE SEQUENCE radgroupreply_seq START WITH 1 INCREMENT BY 1;
-
-/*
- * Table structure for table 'radreply'
- */
-CREATE TABLE radreply (
-	id		INT PRIMARY KEY,
-	UserName	VARCHAR(30) NOT NULL,
-	Attribute	VARCHAR(64),
-	op		CHAR(2) NOT NULL,
-	Value		VARCHAR(40)
-);
-CREATE INDEX radreply_idx1 ON radreply(UserName);
-CREATE SEQUENCE radreply_seq START WITH 1 INCREMENT BY 1;
-
-/* Trigger to emulate a serial # on the primary key */
-CREATE OR REPLACE TRIGGER radreply_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radreply
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radreply_seq.nextval into :new.id from dual;
-		end if;
-	END;
-/
-
-/*
- * Table structure for table 'radusergroup'
- */
-CREATE TABLE radusergroup (
-	id		INT PRIMARY KEY,
-	UserName	VARCHAR(30) UNIQUE NOT NULL,
-	GroupName	VARCHAR(30)
-);
-CREATE SEQUENCE radusergroup_seq START WITH 1 INCREMENT BY 1;
-
-/* Trigger to emulate a serial # on the primary key */
-CREATE OR REPLACE TRIGGER radusergroup_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radusergroup
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radusergroup_seq.nextval into :new.id from dual;
-		end if;
-	END;
-/
-
-
-/*
- * Table structure for table 'realmgroup'
- */
-CREATE TABLE realmgroup (
-	id 		INT PRIMARY KEY,
-	RealmName	VARCHAR(30) UNIQUE NOT NULL,
-	GroupName	VARCHAR(30)
-);
-CREATE SEQUENCE realmgroup_seq START WITH 1 INCREMENT BY 1;
-
-CREATE TABLE realms (
-	id		INT PRIMARY KEY,
-	realmname	VARCHAR(64),
-	nas		VARCHAR(128),
-	authport	INT,
-	options		VARCHAR(128)
-);
-CREATE SEQUENCE realms_seq START WITH 1 INCREMENT BY 1;
-
-CREATE TABLE radhuntgroup (
-	id              INT PRIMARY KEY,
-	GroupName VARCHAR(64) NOT NULL,
-	Nasipaddress VARCHAR(15) UNIQUE NOT NULL,
-	NASPortID VARCHAR(15)
-);
-
-CREATE SEQUENCE radhuntgroup_seq START WITH 1 INCREMENT BY 1;
-
-CREATE OR REPLACE TRIGGER radhuntgroup_serialnumber
-	BEFORE INSERT OR UPDATE OF id ON radhuntgroup
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radhuntgroup_seq.nextval into :new.id from dual;
-		end if;
-	END;
-
-CREATE TABLE radpostauth (
-	  id            INT PRIMARY KEY,
-	  UserName      VARCHAR(64) NOT NULL,
-	  Pass          VARCHAR(64),
-	  Reply         VARCHAR(64),
-	  AuthDate 	DATE
-);
-
-CREATE SEQUENCE radpostauth_seq START WITH 1 INCREMENT BY 1;
-
-CREATE OR REPLACE TRIGGER radpostauth_TRIG
-	BEFORE INSERT OR UPDATE OF id ON radpostauth
-	FOR EACH ROW
-	BEGIN
-		if ( :new.id = 0 or :new.id is null ) then
-			SELECT radpostauth_seq.nextval into :new.id from dual;
-		end if;
-		if (:new.AuthDate is null) then
-		  select sysdate into :new.AuthDate from dual;
-		end if;
-	END;
-
-/
-
-/*
- * Table structure for table 'nas'
- */
-CREATE TABLE nas (
-	id              INT PRIMARY KEY,
-	nasname         VARCHAR(128),
-	shortname       VARCHAR(32),
-	type            VARCHAR(30),
-	ports           INT,
-	secret          VARCHAR(60),
-	server          VARCHAR(64),
-	community       VARCHAR(50),
-	description     VARCHAR(200)
-);
-CREATE SEQUENCE nas_seq START WITH 1 INCREMENT BY 1;
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql
deleted file mode 100644
index 09bcaa7..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql
+++ /dev/null
@@ -1,311 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/*
- * $Id: ec9731648e83c1e1d4ee39ed24a994ee79bb7dd6 $
- *
- * --- Peter Nixon [ codemonkey@peternixon.net ]
- *
- * This is a custom SQL schema for doing H323 and SIP VoIP accounting
- * with FreeRadius and Cisco equipment. It is currently known to work
- * with 3640, 5300 and 5350 series as well as CSPS (Cisco SIP Proxy
- * Server).  It will scale A LOT better than the default radius schema
- * which is designed for simple dialup installations of FreeRadius.
- *
- * For this schema to work properly you MUST use
- * raddb/sql/postgresql/voip-postpaid.conf rather than
- * raddb/sql/postgresql/dialup.conf
- *
- * If you wish to do RADIUS Authentication using the same database,
- * you MUST use use raddb/sql/postgresql/schema.sql as well as this schema.
- */
-
-/*
- * Table structure for 'Start' tables
- */
-
-CREATE TABLE StartVoIP (
-	RadAcctId		BIGSERIAL PRIMARY KEY,
-	AcctTime		TIMESTAMP with time zone NOT NULL,
-	h323SetupTime		TIMESTAMP with time zone,
-	H323ConnectTime		TIMESTAMP with time zone,
-	UserName		VARCHAR(64),
-	RadiusServerName	VARCHAR(32),
-	NASIPAddress		INET NOT NULL,
-	CalledStationId		VARCHAR(80),
-	CallingStationId	VARCHAR(80),
-	AcctDelayTime		INTEGER,
-	H323GWID		VARCHAR(32),
-	h323CallOrigin		VARCHAR(10),
-	CallID			VARCHAR(80) NOT NULL,
-	processed		BOOLEAN DEFAULT false
-);
-create index startvoipcombo on startvoip (AcctTime, nasipaddress);
-
-
-CREATE TABLE StartTelephony (
-	RadAcctId		BIGSERIAL PRIMARY KEY,
-	AcctTime		TIMESTAMP with time zone NOT NULL,
-	h323SetupTime		TIMESTAMP with time zone,
-	H323ConnectTime		TIMESTAMP with time zone,
-	UserName		VARCHAR(64),
-	RadiusServerName	VARCHAR(32),
-	NASIPAddress		INET NOT NULL,
-	CalledStationId		VARCHAR(80),
-	CallingStationId	VARCHAR(80),
-	AcctDelayTime		INTEGER,
-	H323GWID		VARCHAR(32),
-	h323CallOrigin		VARCHAR(10),
-	CallID			VARCHAR(80) NOT NULL,
-	processed		BOOLEAN DEFAULT false
-);
-create index starttelephonycombo on starttelephony (AcctTime, nasipaddress);
-
-
-
-/*
- * Table structure for 'Stop' tables
- */
-CREATE TABLE StopVoIP (
-	RadAcctId		BIGSERIAL PRIMARY KEY,
-	AcctTime		TIMESTAMP with time zone NOT NULL,
-	H323SetupTime		TIMESTAMP with time zone,
-	H323ConnectTime		TIMESTAMP with time zone,
-	H323DisconnectTime	TIMESTAMP with time zone,
-	UserName		VARCHAR(32),
-	RadiusServerName	VARCHAR(32),
-	NASIPAddress		INET NOT NULL,
-	AcctSessionTime		BIGINT,
-	AcctInputOctets		BIGINT,
-	AcctOutputOctets	BIGINT,
-	CalledStationId		VARCHAR(80),
-	CallingStationId	VARCHAR(80),
-	AcctDelayTime		SMALLINT,
-	CiscoNASPort		VARCHAR(1),
-	H323GWID		VARCHAR(32),
-	H323CallOrigin		VARCHAR(10),
-	H323DisconnectCause	VARCHAR(20),
-	H323RemoteAddress	INET,
-	H323VoiceQuality	INTEGER,
-	CallID			VARCHAR(80) NOT NULL,
-	processed		BOOLEAN DEFAULT false
-);
-create UNIQUE index stopvoipcombo on stopvoip (AcctTime, nasipaddress, CallID);
-
-
-CREATE TABLE StopTelephony (
-	RadAcctId		BIGSERIAL PRIMARY KEY,
-	AcctTime		TIMESTAMP with time zone NOT NULL,
-	H323SetupTime		TIMESTAMP with time zone NOT NULL,
-	H323ConnectTime		TIMESTAMP with time zone NOT NULL,
-	H323DisconnectTime	TIMESTAMP with time zone NOT NULL,
-	UserName		VARCHAR(32) DEFAULT '' NOT NULL,
-	RadiusServerName	VARCHAR(32),
-	NASIPAddress		INET NOT NULL,
-	AcctSessionTime		BIGINT,
-	AcctInputOctets		BIGINT,
-	AcctOutputOctets	BIGINT,
-	CalledStationId		VARCHAR(80),
-	CallingStationId	VARCHAR(80),
-	AcctDelayTime		SMALLINT,
-	CiscoNASPort		VARCHAR(16),
-	H323GWID		VARCHAR(32),
-	H323CallOrigin		VARCHAR(10),
-	H323DisconnectCause	VARCHAR(20),
-	H323RemoteAddress	INET,
-	H323VoiceQuality	INTEGER,
-	CallID			VARCHAR(80) NOT NULL,
-	processed		BOOLEAN DEFAULT false
-);
--- You can have more than one record that is identical except for CiscoNASPort if you have a dial peer hungroup
--- configured for multiple PRIs.
-create UNIQUE index stoptelephonycombo on stoptelephony (AcctTime, nasipaddress, CallID, CiscoNASPort);
-
-/*
- * Table structure for 'gateways'
- *
- * This table should list the IP addresses, names and locations of all your gateways
- * This can be used to make more useful reports.
- *
- * Note: This table should be removed in favour of using the "nas" table.
- */
-
-CREATE TABLE gateways (
-	gw_ip		INET NOT NULL,
-	gw_name		VARCHAR(32) NOT NULL,
-	gw_city		VARCHAR(32)
-);
-
-
-/*
- * Table structure for 'customers'
- *
- * This table should list your Customers names and company
- * This can be used to make more useful reports.
- */
-
-CREATE TABLE customers (
-	cust_id		SERIAL NOT NULL,
-	company		VARCHAR(32),
-	customer	VARCHAR(32)
-);
-
-/*
- * Table structure for 'cust_gw'
- *
- * This table should list the IP addresses and Customer IDs of all your Customers gateways
- * This can be used to make more useful reports.
- */
-
-CREATE TABLE cust_gw (
-	cust_gw		INET PRIMARY KEY,
-	cust_id		INTEGER NOT NULL,
-	"location"	VARCHAR(32)
-);
-
-
-CREATE VIEW customerip AS
-    SELECT gw.cust_gw AS ipaddr, cust.company, cust.customer, gw."location" FROM customers cust, cust_gw gw WHERE (cust.cust_id = gw.cust_id);
-
-
--- create plpgsql language (You need to be a database superuser to be able to do this)
-CREATE FUNCTION "plpgsql_call_handler" () RETURNS LANGUAGE_HANDLER AS '$libdir/plpgsql' LANGUAGE C;
-CREATE TRUSTED LANGUAGE "plpgsql" HANDLER "plpgsql_call_handler";
-
-/*
- * Function 'strip_dot'
- * removes "." from the start of cisco timestamps
- *
- * From the cisco website:
- * "A timestamp that is preceded by an asterisk (*) or a dot (.) may not be accurate.
- *  An asterisk (*) means that after a gateway reboot, the gateway clock was not manually set
- *  and the gateway has not synchronized with an NTP server yet. A dot (.) means the gateway
- *  NTP has lost synchronization with an NTP server."
- *
- * We therefore do not bother to strip asterisks (*) from timestamps, as you NEED ntp setup
- * unless you don't care about billing at all!
- *
- *  * Example useage:
- *      insert into mytable values (strip_dot('.16:46:02.356 EET Wed Dec 11 2002'));
- *
- */
-
-
-CREATE OR REPLACE FUNCTION strip_dot (VARCHAR) RETURNS TIMESTAMPTZ AS '
- DECLARE
-	original_timestamp ALIAS FOR $1;
- BEGIN
-	IF original_timestamp = '''' THEN
-		RETURN NULL;
-	END IF;
-	IF substring(original_timestamp from 1 for 1) = ''.'' THEN
-		RETURN substring(original_timestamp from 2);
-	ELSE
-		RETURN original_timestamp;
-	END IF;
- END;
-' LANGUAGE 'plpgsql';
-
-
-CREATE OR REPLACE FUNCTION pick_id (VARCHAR, VARCHAR) RETURNS VARCHAR AS '
- DECLARE
-	h323confid ALIAS FOR $1;
-	callid ALIAS FOR $2;
- BEGIN
-	IF h323confid <> '''' THEN
-		RETURN h323confid;
-	END IF;
-	IF callid <> '''' THEN
-		RETURN callid;
-	END IF;
-	RETURN NULL;
- END;
-' LANGUAGE 'plpgsql';
-
-
-
-/*
- * Table structure for 'isdn_error_codes' table
- *
- * Taken from cisco.com this data can be JOINED against h323DisconnectCause to
- * give human readable error reports.
- *
- */
-
-
-CREATE TABLE isdn_error_codes (
-	error_code	VARCHAR(2) PRIMARY KEY,
-	desc_short	VARCHAR(90),
-	desc_long	TEXT
-);
-
-/*
- * Data for 'isdn_error_codes' table
- */
-
-INSERT INTO isdn_error_codes VALUES ('1', 'Unallocated (unassigned) number', 'The ISDN number was sent to the switch in the correct format; however, the number is not assigned to any destination equipment.');
-INSERT INTO isdn_error_codes VALUES ('10', 'Normal call clearing', 'Normal call clearing has occurred.');
-INSERT INTO isdn_error_codes VALUES ('11', 'User busy', 'The called system acknowledges the connection request but is unable to accept the call because all B channels are in use.');
-INSERT INTO isdn_error_codes VALUES ('12', 'No user responding', 'The connection cannot be completed because the destination does not respond to the call.');
-INSERT INTO isdn_error_codes VALUES ('13', 'No answer from user (user alerted)', 'The destination responds to the connection request but fails to complete the connection within the prescribed time. The problem is at the remote end of the connection.');
-INSERT INTO isdn_error_codes VALUES ('15', 'Call rejected', 'The destination is capable of accepting the call but rejected the call for an unknown reason.');
-INSERT INTO isdn_error_codes VALUES ('16', 'Number changed', 'The ISDN number used to set up the call is not assigned to any system.');
-INSERT INTO isdn_error_codes VALUES ('1A', 'Non-selected user clearing', 'The destination is capable of accepting the call but rejected the call because it was not assigned to the user.');
-INSERT INTO isdn_error_codes VALUES ('1B', 'Designation out of order', 'The destination cannot be reached because the interface is not functioning correctly, and a signaling message cannot be delivered. This might be a temporary condition, but it could last for an extended period of time. For example, the remote equipment might be turned off.');
-INSERT INTO isdn_error_codes VALUES ('1C', 'Invalid number format', 'The connection could be established because the destination address was presented in an unrecognizable format or because the destination address was incomplete.');
-INSERT INTO isdn_error_codes VALUES ('1D', 'Facility rejected', 'The facility requested by the user cannot be provided by the network.');
-INSERT INTO isdn_error_codes VALUES ('1E', 'Response to STATUS ENQUIRY', 'The status message was generated in direct response to the prior receipt of a status enquiry message.');
-INSERT INTO isdn_error_codes VALUES ('1F', 'Normal, unspecified', 'Reports the occurrence of a normal event when no standard cause applies. No action required.');
-INSERT INTO isdn_error_codes VALUES ('2', 'No route to specified transit network', 'The ISDN exchange is asked to route the call through an unrecognized intermediate network.');
-INSERT INTO isdn_error_codes VALUES ('22', 'No circuit/channel available', 'The connection cannot be established because no appropriate channel is available to take the call.');
-INSERT INTO isdn_error_codes VALUES ('26', 'Network out of order', 'The destination cannot be reached because the network is not functioning correctly, and the condition might last for an extended period of time. An immediate reconnect attempt will probably be unsuccessful.');
-INSERT INTO isdn_error_codes VALUES ('29', 'Temporary failure', 'An error occurred because the network is not functioning correctly. The problem will be resolved shortly.');
-INSERT INTO isdn_error_codes VALUES ('2A', 'Switching equipment congestion', 'The destination cannot be reached because the network switching equipment is temporarily overloaded.');
-INSERT INTO isdn_error_codes VALUES ('2B', 'Access information discarded', 'The network cannot provide the requested access information.');
-INSERT INTO isdn_error_codes VALUES ('2C', 'Requested circuit/channel not available', 'The remote equipment cannot provide the requested channel for an unknown reason. This might be a temporary problem.');
-INSERT INTO isdn_error_codes VALUES ('2F', 'Resources unavailable, unspecified', 'The requested channel or service is unavailable for an unknown reason. This might be a temporary problem.');
-INSERT INTO isdn_error_codes VALUES ('3', 'No route to destination', 'The call was routed through an intermediate network that does not serve the destination address.');
-INSERT INTO isdn_error_codes VALUES ('31', 'Quality of service unavailable', 'The requested quality of service cannot be provided by the network. This might be a subscription problem.');
-INSERT INTO isdn_error_codes VALUES ('32', 'Requested facility not subscribed', 'The remote equipment supports the requested supplementary service by subscription only.');
-INSERT INTO isdn_error_codes VALUES ('39', 'Bearer capability not authorized', 'The user requested a bearer capability that the network provides, but the user is not authorized to use it. This might be a subscription problem.');
-INSERT INTO isdn_error_codes VALUES ('3A', 'Bearer capability not presently available', 'The network normally provides the requested bearer capability, but it is unavailable at the present time. This might be due to a temporary network problem or to a subscription problem.');
-INSERT INTO isdn_error_codes VALUES ('3F', 'Service or option not available, unspecified', 'The network or remote equipment was unable to provide the requested service option for an unspecified reason. This might be a subscription problem.');
-INSERT INTO isdn_error_codes VALUES ('41', 'Bearer capability not implemented', 'The network cannot provide the bearer capability requested by the user.');
-INSERT INTO isdn_error_codes VALUES ('42', 'Channel type not implemented', 'The network or the destination equipment does not support the requested channel type.');
-INSERT INTO isdn_error_codes VALUES ('45', 'Requested facility not implemented', 'The remote equipment does not support the requested supplementary service.');
-INSERT INTO isdn_error_codes VALUES ('46', 'Only restricted digital information bearer capability is available', 'The network is unable to provide unrestricted digital information bearer capability.');
-INSERT INTO isdn_error_codes VALUES ('4F', 'Service or option not implemented, unspecified', 'The network or remote equipment is unable to provide the requested service option for an unspecified reason. This might be a subscription problem.');
-INSERT INTO isdn_error_codes VALUES ('51', 'Invalid call reference value', 'The remote equipment received a call with a call reference that is not currently in use on the user-network interface.');
-INSERT INTO isdn_error_codes VALUES ('52', 'Identified channel does not exist', 'The receiving equipment is requested to use a channel that is not activated on the interface for calls.');
-INSERT INTO isdn_error_codes VALUES ('53', 'A suspended call exists, but this call identity does not', 'The network received a call resume request. The call resume request contained a Call Identify information element that indicates that the call identity is being used for a suspended call.');
-INSERT INTO isdn_error_codes VALUES ('54', 'Call identity in use', 'The network received a call resume request. The call resume request contained a Call Identify information element that indicates that it is in use for a suspended call.');
-INSERT INTO isdn_error_codes VALUES ('55', 'No call suspended', 'The network received a call resume request when there was not a suspended call pending. This might be a transient error that will be resolved by successive call retries.');
-INSERT INTO isdn_error_codes VALUES ('56', 'Call having the requested call identity has been cleared', 'The network received a call resume request. The call resume request contained a Call Identity information element, which once indicated a suspended call. However, the suspended call was cleared either by timeout or by the remote user.');
-INSERT INTO isdn_error_codes VALUES ('58', 'Incompatible destination', 'Indicates that an attempt was made to connect to non-ISDN equipment. For example, to an analog line.');
-INSERT INTO isdn_error_codes VALUES ('5B', 'Invalid transit network selection', 'The ISDN exchange was asked to route the call through an unrecognized intermediate network.');
-INSERT INTO isdn_error_codes VALUES ('5F', 'Invalid message, unspecified', 'An invalid message was received, and no standard cause applies. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
-INSERT INTO isdn_error_codes VALUES ('6', 'Channel unacceptable', 'The service quality of the specified channel is insufficient to accept the connection.');
-INSERT INTO isdn_error_codes VALUES ('60', 'Mandatory information element is missing', 'The receiving equipment received a message that did not include one of the mandatory information elements. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
-INSERT INTO isdn_error_codes VALUES ('61', 'Message type non-existent or not implemented', 'The receiving equipment received an unrecognized message, either because the message type was invalid or because the message type was valid but not supported. The cause is due to either a problem with the remote configuration or a problem with the local D channel.');
-INSERT INTO isdn_error_codes VALUES ('62', 'Message not compatible with call state or message type non-existent or not implemented', 'The remote equipment received an invalid message, and no standard cause applies. This cause is due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
-INSERT INTO isdn_error_codes VALUES ('63', 'Information element non-existent or not implemented', 'The remote equipment received a message that includes information elements, which were not recognized. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
-INSERT INTO isdn_error_codes VALUES ('64', 'Invalid information element contents', 'The remote equipment received a message that includes invalid information in the information element. This is usually due to a D-channel error.');
-INSERT INTO isdn_error_codes VALUES ('65', 'Message not compatible with call state', 'The remote equipment received an unexpected message that does not correspond to the current state of the connection. This is usually due to a D-channel error.');
-INSERT INTO isdn_error_codes VALUES ('66', 'Recovery on timer expires', 'An error-handling (recovery) procedure was initiated by a timer expiry. This is usually a temporary problem.');
-INSERT INTO isdn_error_codes VALUES ('6F', 'Protocol error, unspecified', 'An unspecified D-channel error when no other standard cause applies.');
-INSERT INTO isdn_error_codes VALUES ('7', 'Call awarded and being delivered in an established channel', 'The user is assigned an incoming call that is being connected to an already-established call channel.');
-INSERT INTO isdn_error_codes VALUES ('7F', 'Internetworking, unspecified', 'An event occurred, but the network does not provide causes for the action that it takes. The precise problem is unknown.');
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql
deleted file mode 100644
index 9630b7b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql
+++ /dev/null
@@ -1,53 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/*
- * $Id: 37f42a0b13515b09f9c7792e8a64b2a3b187e7a3 $
- *
- * OPTIONAL Postgresql trigger for FreeRADIUS
- *
- * This trigger updates fills in the groupname field (which doesnt come in Accounting packets)
- * by querying the radusergroup table.
- * This makes it easier to do group summary reports, however note that it does add some extra
- * database load to 50% of your SQL accounting queries. If you dont care about group summary
- * reports then you dont need to install this.
- *
- */
-
-
-CREATE OR REPLACE FUNCTION upd_radgroups() RETURNS trigger AS'
-
-DECLARE
-	v_groupname varchar;
-
-BEGIN
-	SELECT INTO v_groupname GroupName FROM radusergroup WHERE CalledStationId = NEW.CalledStationId AND UserName = NEW.UserName;
-	IF FOUND THEN
-		UPDATE radacct SET GroupName = v_groupname WHERE RadAcctId = NEW.RadAcctId;
-	END IF;
-
-	RETURN NEW;
-END
-
-'LANGUAGE plpgsql;
-
-
-DROP TRIGGER upd_radgroups ON radacct;
-
-CREATE TRIGGER upd_radgroups AFTER INSERT ON radacct
-    FOR EACH ROW EXECUTE PROCEDURE upd_radgroups();
-
-
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf
deleted file mode 100644
index 6ae361d..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf
+++ /dev/null
@@ -1,70 +0,0 @@
-# -*- text -*-
-##
-## voip-postpaid.conf -- PostgreSQL configuration for H323 VoIP billingx
-##			 (cisco_h323_db_schema.sql)
-##
-##	$Id: 9f1449cc37d80e37025bdfd08fbd4d028aa0c800 $
-
-
-	#######################################################################
-	#  Query config:  Username
-	#######################################################################
-	# This is the username that will get substituted, escaped, and added
-	# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
-	# everywhere a username substitution is needed so you you can be sure
-	# the username passed from the client is escaped properly.
-	#
-	#  Uncomment the next line, if you want the sql_user_name to mean:
-	#
-	#    Use Stripped-User-Name, if it's there.
-	#    Else use User-Name, if it's there,
-	#    Else use hard-coded string "none" as the user name.
-	#
-	#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
-	#
-	sql_user_name = "%{User-Name}"
-
-	accounting {
-		reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-
-		# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-		# when used with the rlm_sql_null driver.
-#		logfile = ${logdir}/accounting.sql
-
-		type {
-			start {
-				query = "INSERT INTO ${....acct_table1}%{h323-call-type} \
-						(RadiusServerName, UserName, NASIPAddress, AcctTime, CalledStationId, \
-						 CallingStationId, AcctDelayTime, h323gwid, h323callorigin, \
-						 h323setuptime, H323ConnectTime, callid) \
-					VALUES(\
-						'${radius_server_name}', '%{SQL-User-Name}', \
-						'%{NAS-IP-Address}', now(), '%{Called-Station-Id}', \
-						'%{Calling-Station-Id}', '%{%{Acct-Delay-Time}:-0}', '%{h323-gw-id}', \
-						'%{h323-call-origin}', strip_dot('%{h323-setup-time}'), \
-						strip_dot('%{h323-connect-time}'), pick_id('%{h323-conf-id}', \
-						'%{call-id}'))"
-			}
-
-			stop {
-				query = "INSERT INTO $....acct_table2}%{h323-call-type} \
-						(RadiusServerName, UserName, NASIPAddress, AcctTime, \
-						 AcctSessionTime, AcctInputOctets, AcctOutputOctets, CalledStationId, \
-						 CallingStationId, AcctDelayTime, H323RemoteAddress, H323VoiceQuality, \
-						 CiscoNASPort, h323callorigin, callid, h323connecttime, \
-						 h323disconnectcause, h323disconnecttime, h323gwid, h323setuptime) \
-					VALUES(\
-						'${radius_server_name}', '%{SQL-User-Name}', '%{NAS-IP-Address}', \
-						NOW(),  '%{%{Acct-Session-Time}:-0}', \
-						'%{%{Acct-Input-Octets}:-0}', '%{%{Acct-Output-Octets}:-0}', \
-						'%{Called-Station-Id}', '%{Calling-Station-Id}', \
-						'%{%{Acct-Delay-Time}:-0}', NULLIF('%{h323-remote-address}', '')::inet, \
-						NULLIF('%{h323-voice-quality}','')::integer, \
-						NULLIF('%{Cisco-NAS-Port}', ''), \
-						'%{h323-call-origin}', pick_id('%{h323-conf-id}', '%{call-id}'), \
-						strip_dot('%{h323-connect-time}'), '%{h323-disconnect-cause}', \
-						strip_dot('%{h323-disconnect-time}'), '%{h323-gw-id}', \
-						strip_dot('%{h323-setup-time}'))"
-			}
-		}
-	}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf
deleted file mode 100644
index d5b61cf..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf
+++ /dev/null
@@ -1,448 +0,0 @@
-# -*- text -*-
-#
-#  main/postgresql/queries.conf -- PostgreSQL configuration for default schema (schema.sql)
-#
-#  $Id: 0f2a29afff36136bb171a9a97ee90199b017e46c $
-
-# Safe characters list for sql queries. Everything else is replaced
-# with their mime-encoded equivalents.
-# The default list should be ok
-# safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used
-# below everywhere a username substitution is needed so you you can
-# be sure the username passed from the client is escaped properly.
-#
-# Uncomment the next line, if you want the sql_user_name to mean:
-#
-#    Use Stripped-User-Name, if it's there.
-#    Else use User-Name, if it's there,
-#    Else use hard-coded string "none" as the user name.
-#
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
-
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-#  Default profile
-#######################################################################
-# This is the default profile. It is found in SQL by group membership.
-# That means that this profile must be a member of at least one group
-# which will contain the corresponding check and reply items.
-# This profile will be queried in the authorize section for every user.
-# The point is to assign all users a default profile without having to
-# manually add each one to a group that will contain the profile.
-# The SQL module will also honor the User-Profile attribute. This
-# attribute can be set anywhere in the authorize section (ie the users
-# file). It is found exactly as the default profile is found.
-# If it is set then it will *overwrite* the default profile setting.
-# The idea is to select profiles based on checks on the incoming
-# packets, not on user group membership. For example:
-# -- users file --
-# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
-# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
-#
-# By default the default_user_profile is not set
-#
-# default_user_profile = "DEFAULT"
-
-#######################################################################
-#  Open Query
-#######################################################################
-# This query is run whenever a new connection is opened.
-# It is commented out by default.
-#
-# If you have issues with connections hanging for too long, uncomment
-# the next line, and set the timeout in milliseconds.  As a general
-# rule, if the queries take longer than a second, something is wrong
-# with the database.
-#open_query = "set statement_timeout to 1000"
-
-#######################################################################
-#  NAS Query
-#######################################################################
-#  This query retrieves the radius clients
-#
-#  0. Row ID (currently unused)
-#  1. Name (or IP address)
-#  2. Shortname
-#  3. Type
-#  4. Secret
-#  5. Server
-#######################################################################
-
-client_query = "\
-	SELECT id, nasname, shortname, type, secret, server \
-	FROM ${client_table}"
-
-#######################################################################
-#  Authorization Queries
-#######################################################################
-#  These queries compare the check items for the user
-#  in ${authcheck_table} and setup the reply items in
-#  ${authreply_table}.  You can use any query/tables
-#  you want, but the return data for each row MUST
-#  be in the  following order:
-#
-#  0. Row ID (currently unused)
-#  1. UserName/GroupName
-#  2. Item Attr Name
-#  3. Item Attr Value
-#  4. Item Attr Operation
-#######################################################################
-
-#
-#  Use these for case insensitive usernames. WARNING: Slower queries!
-#
-#authorize_check_query = "\
-#	SELECT id, UserName, Attribute, Value, Op \
-#	FROM ${authcheck_table} \
-#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
-#	ORDER BY id"
-
-#authorize_reply_query = "\
-#	SELECT id, UserName, Attribute, Value, Op \
-#	FROM ${authreply_table} \
-#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
-#	ORDER BY id"
-
-authorize_check_query = "\
-	SELECT id, UserName, Attribute, Value, Op \
-	FROM ${authcheck_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_reply_query = "\
-	SELECT id, UserName, Attribute, Value, Op \
-	FROM ${authreply_table} \
-	WHERE Username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-#
-#  Use these for case insensitive usernames. WARNING: Slower queries!
-#
-#authorize_group_check_query = "\
-#	SELECT \
-#		${groupcheck_table}.id, ${groupcheck_table}.GroupName, ${groupcheck_table}.Attribute, \
-#		${groupcheck_table}.Value, ${groupcheck_table}.Op \
-#	FROM ${groupcheck_table}, ${usergroup_table} \
-#	WHERE LOWER(${usergroup_table}.UserName) = LOWER('%{SQL-User-Name}') \
-#	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
-#	ORDER BY ${groupcheck_table}.id"
-
-#authorize_group_reply_query = "\
-#	SELECT \
-#		${groupreply_table}.id, ${groupreply_table}.GroupName, \
-#		${groupreply_table}.Attribute, ${groupreply_table}.Value, ${groupreply_table}.Op \
-#	FROM ${groupreply_table}, ${usergroup_table} \
-#	WHERE LOWER(${usergroup_table}.UserName) = LOWER('%{SQL-User-Name}') \
-#	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
-#	ORDER BY ${groupreply_table}.id"
-
-authorize_group_check_query = "\
-	SELECT id, GroupName, Attribute, Value, op \
-	FROM ${groupcheck_table} \
-	WHERE GroupName = '%{Sql-Group}' \
-	ORDER BY id"
-
-authorize_group_reply_query = "\
-	SELECT id, GroupName, Attribute, Value, op \
-	FROM ${groupreply_table} \
-	WHERE GroupName = '%{Sql-Group}' \
-	ORDER BY id"
-
-#######################################################################
-# Simultaneous Use Checking Queries
-#######################################################################
-# simul_count_query     - query for the number of current connections
-#                       - If this is not defined, no simultaneous use checking
-#                       - will be performed by this module instance
-# simul_verify_query    - query to return details of current connections for verification
-#                       - Leave blank or commented out to disable verification step
-#                       - Note that the returned field order should not be changed.
-#######################################################################
-
-#
-#  Uncomment simul_count_query to enable simultaneous use checking
-#
-#simul_count_query = "\
-#	SELECT COUNT(*) \
-#	FROM ${acct_table1} \
-#	WHERE UserName='%{SQL-User-Name}' \
-#	AND AcctStopTime IS NULL"
-
-#simul_verify_query = "\
-#	SELECT RadAcctId, AcctSessionId, UserName, NASIPAddress, NASPortId, FramedIPAddress, CallingStationId, \
-#		FramedProtocol \
-#	FROM ${acct_table1} \
-#	WHERE UserName='%{SQL-User-Name}' \
-#	AND AcctStopTime IS NULL"
-
-#######################################################################
-# Group Membership Queries
-#######################################################################
-# group_membership_query        - Check user group membership
-#######################################################################
-
-# Use these for case insensitive usernames. WARNING: Slower queries!
-#group_membership_query = "\
-#	SELECT GroupName \
-#	FROM ${usergroup_table} \
-#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
-#	ORDER BY priority"
-
-group_membership_query = "\
-	SELECT GroupName \
-	FROM ${usergroup_table} \
-	WHERE UserName='%{SQL-User-Name}' \
-	ORDER BY priority"
-
-#######################################################################
-# Accounting and Post-Auth Queries
-#######################################################################
-# These queries insert/update accounting and authentication records.
-# The query to use is determined by the value of 'reference'.
-# This value is used as a configuration path and should resolve to one
-# or more 'query's. If reference points to multiple queries, and a query
-# fails, the next query is executed.
-#
-# Behaviour is identical to the old 1.x/2.x module, except we can now
-# fail between N queries, and query selection can be based on any
-# combination of attributes, or custom 'Acct-Status-Type' values.
-#######################################################################
-accounting {
-	reference = "%{tolower:type.%{%{Acct-Status-Type}:-none}.query}"
-
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/accounting.sql
-
-	column_list = "\
-		AcctSessionId,		AcctUniqueId,		UserName, \
-		Realm,			NASIPAddress,		NASPortId, \
-		NASPortType,		AcctStartTime,		AcctUpdateTime, \
-		AcctStopTime,		AcctSessionTime, 	AcctAuthentic, \
-		ConnectInfo_start,	ConnectInfo_Stop, 	AcctInputOctets, \
-		AcctOutputOctets,	CalledStationId, 	CallingStationId, \
-		AcctTerminateCause,	ServiceType,		FramedProtocol, \
-		FramedIpAddress"
-
-	type {
-		accounting-on {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctSessionTime = (%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime))), \
-					AcctTerminateCause = '%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
-				WHERE AcctStopTime IS NULL \
-				AND NASIPAddress= '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
-				AND AcctStartTime <= '%S'::timestamp"
-		}
-
-		accounting-off {
-			query = "${..accounting-on.query}"
-		}
-
-		start {
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					NULLIF('%{Realm}', ''), \
-					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
-					%{%{NAS-Port}:-NULL}, \
-					'%{NAS-Port-Type}', \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					NULL, \
-					0, \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					NULL, \
-					0, \
-					0, \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					NULL, \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					NULLIF('%{Framed-IP-Address}', '')::inet)"
-
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStartTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					ConnectInfo_start = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
-				AND AcctStopTime IS NULL"
-
-			# and again where we don't have "AND AcctStopTime IS NULL"
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					AcctStartTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					ConnectInfo_start = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}'"
-		}
-
-		interim-update {
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
-					AcctSessionTime = %{%{Acct-Session-Time}:-NULL}, \
-					AcctInterval = (%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM (COALESCE(AcctUpdateTime, AcctStartTime)))), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Input-Octets}:-0}'::bigint), \
-					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Output-Octets}:-0}'::bigint) \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress= '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
-				AND AcctStopTime IS NULL"
-
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					NULLIF('%{Realm}', ''), \
-					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
-					%{%{NAS-Port}:-NULL}, \
-					'%{NAS-Port-Type}', \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					NULL, \
-					%{%{Acct-Session-Time}:-NULL}, \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					NULL, \
-					(('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Input-Octets}:-0}'::bigint), \
-					(('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Output-Octets}:-0}'::bigint), \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					NULL, \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					NULLIF('%{Framed-IP-Address}', '')::inet)"
-		}
-
-		stop {
-			query = "\
-				UPDATE ${....acct_table2} \
-				SET \
-					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctSessionTime = COALESCE(%{%{Acct-Session-Time}:-NULL}, \
-						(%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime)))), \
-					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Input-Octets}:-0}'::bigint), \
-					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Output-Octets}:-0}'::bigint), \
-					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
-					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
-					ConnectInfo_stop = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
-				AND AcctStopTime IS NULL"
-
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES(\
-					'%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					NULLIF('%{Realm}', ''), \
-					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
-					%{%{NAS-Port}:-NULL}, \
-					'%{NAS-Port-Type}', \
-					TO_TIMESTAMP(%{integer:Event-Timestamp} - %{%{Acct-Session-Time}:-0}), \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					NULLIF('%{Acct-Session-Time}', '')::bigint, \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					NULL, \
-					(('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Input-Octets}:-0}'::bigint), \
-					(('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Output-Octets}:-0}'::bigint), \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Acct-Terminate-Cause}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					NULLIF('%{Framed-IP-Address}', '')::inet)"
-
-			# and again where we don't have "AND AcctStopTime IS NULL"
-			query = "\
-				UPDATE ${....acct_table2} \
-				SET \
-					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
-					AcctSessionTime = COALESCE(%{%{Acct-Session-Time}:-NULL}, \
-						(%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime)))), \
-					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Input-Octets}:-0}'::bigint), \
-					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
-						'%{%{Acct-Output-Octets}:-0}'::bigint), \
-					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
-					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
-					ConnectInfo_stop = '%{Connect-Info}' \
-				WHERE AcctSessionId = '%{Acct-Session-Id}' \
-				AND UserName = '%{SQL-User-Name}' \
-				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}'"
-		}
-
-		#
-		#  No Acct-Status-Type == ignore the packet
-		#
-		none {
-		     query = "SELECT true"
-		}
-	}
-}
-
-
-#######################################################################
-# Authentication Logging Queries
-#######################################################################
-# postauth_query                - Insert some info after authentication
-#######################################################################
-
-post-auth {
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/post-auth.sql
-
-	query = "\
-		INSERT INTO ${..postauth_table} \
-			(username, pass, reply, authdate) \
-		VALUES(\
-			'%{User-Name}', \
-			'%{%{User-Password}:-Chap-Password}', \
-			'%{reply:Packet-Type}', \
-			NOW())"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql
deleted file mode 100644
index 6c74f5c..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql
+++ /dev/null
@@ -1,194 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/*
- * $Id: 5ab9d29306cbef460fe310aafd5d2046267cbbdc $
- *
- * Postgresql schema for FreeRADIUS
- *
- * All field lengths need checking as some are still suboptimal. -pnixon 2003-07-13
- *
- */
-
-/*
- * Table structure for table 'radacct'
- *
- * Note: Column type bigserial does not exist prior to Postgres 7.2
- *       If you run an older version you need to change this to serial
- */
-CREATE TABLE radacct (
-	RadAcctId		bigserial PRIMARY KEY,
-	AcctSessionId		text NOT NULL,
-	AcctUniqueId		text NOT NULL UNIQUE,
-	UserName		text,
-	GroupName		text,
-	Realm			text,
-	NASIPAddress		inet NOT NULL,
-	NASPortId		text,
-	NASPortType		text,
-	AcctStartTime		timestamp with time zone,
-	AcctUpdateTime		timestamp with time zone,
-	AcctStopTime		timestamp with time zone,
-	AcctInterval		bigint,
-	AcctSessionTime		bigint,
-	AcctAuthentic		text,
-	ConnectInfo_start	text,
-	ConnectInfo_stop	text,
-	AcctInputOctets		bigint,
-	AcctOutputOctets	bigint,
-	CalledStationId		text,
-	CallingStationId	text,
-	AcctTerminateCause	text,
-	ServiceType		text,
-	FramedProtocol		text,
-	FramedIPAddress		inet
-);
--- This index may be useful..
--- CREATE UNIQUE INDEX radacct_whoson on radacct (AcctStartTime, nasipaddress);
-
--- For use by update-, stop- and simul_* queries
-CREATE INDEX radacct_active_user_idx ON radacct (AcctSessionId, UserName, NASIPAddress) WHERE AcctStopTime IS NULL;
-
--- For use by onoff-
-create INDEX radacct_bulk_close ON radacct (NASIPAddress, AcctStartTime) WHERE AcctStopTime IS NULL;
-
--- and for common statistic queries:
-CREATE INDEX radacct_start_user_idx ON radacct (AcctStartTime, UserName);
-
--- and, optionally
--- CREATE INDEX radacct_stop_user_idx ON radacct (acctStopTime, UserName);
-
-/*
- * There was WAAAY too many indexes previously. This combo index
- * should take care of the most common searches.
- * I have commented out all the old indexes, but left them in case
- * someone wants them. I don't recomend anywone use them all at once
- * as they will slow down your DB too much.
- *  - pnixon 2003-07-13
- */
-
-/*
- * create index radacct_UserName on radacct (UserName);
- * create index radacct_AcctSessionId on radacct (AcctSessionId);
- * create index radacct_AcctUniqueId on radacct (AcctUniqueId);
- * create index radacct_FramedIPAddress on radacct (FramedIPAddress);
- * create index radacct_NASIPAddress on radacct (NASIPAddress);
- * create index radacct_AcctStartTime on radacct (AcctStartTime);
- * create index radacct_AcctStopTime on radacct (AcctStopTime);
-*/
-
-
-
-/*
- * Table structure for table 'radcheck'
- */
-CREATE TABLE radcheck (
-	id			serial PRIMARY KEY,
-	UserName		text NOT NULL DEFAULT '',
-	Attribute		text NOT NULL DEFAULT '',
-	op			VARCHAR(2) NOT NULL DEFAULT '==',
-	Value			text NOT NULL DEFAULT ''
-);
-create index radcheck_UserName on radcheck (UserName,Attribute);
-/*
- * Use this index if you use case insensitive queries
- */
--- create index radcheck_UserName_lower on radcheck (lower(UserName),Attribute);
-
-/*
- * Table structure for table 'radgroupcheck'
- */
-CREATE TABLE radgroupcheck (
-	id			serial PRIMARY KEY,
-	GroupName		text NOT NULL DEFAULT '',
-	Attribute		text NOT NULL DEFAULT '',
-	op			VARCHAR(2) NOT NULL DEFAULT '==',
-	Value			text NOT NULL DEFAULT ''
-);
-create index radgroupcheck_GroupName on radgroupcheck (GroupName,Attribute);
-
-/*
- * Table structure for table 'radgroupreply'
- */
-CREATE TABLE radgroupreply (
-	id			serial PRIMARY KEY,
-	GroupName		text NOT NULL DEFAULT '',
-	Attribute		text NOT NULL DEFAULT '',
-	op			VARCHAR(2) NOT NULL DEFAULT '=',
-	Value			text NOT NULL DEFAULT ''
-);
-create index radgroupreply_GroupName on radgroupreply (GroupName,Attribute);
-
-/*
- * Table structure for table 'radreply'
- */
-CREATE TABLE radreply (
-	id			serial PRIMARY KEY,
-	UserName		text NOT NULL DEFAULT '',
-	Attribute		text NOT NULL DEFAULT '',
-	op			VARCHAR(2) NOT NULL DEFAULT '=',
-	Value			text NOT NULL DEFAULT ''
-);
-create index radreply_UserName on radreply (UserName,Attribute);
-/*
- * Use this index if you use case insensitive queries
- */
--- create index radreply_UserName_lower on radreply (lower(UserName),Attribute);
-
-/*
- * Table structure for table 'radusergroup'
- */
-CREATE TABLE radusergroup (
-	id			serial PRIMARY KEY,
-	UserName		text NOT NULL DEFAULT '',
-	GroupName		text NOT NULL DEFAULT '',
-	priority		integer NOT NULL DEFAULT 0
-);
-create index radusergroup_UserName on radusergroup (UserName);
-/*
- * Use this index if you use case insensitive queries
- */
--- create index radusergroup_UserName_lower on radusergroup (lower(UserName));
-
---
--- Table structure for table 'radpostauth'
---
-
-CREATE TABLE radpostauth (
-	id			bigserial PRIMARY KEY,
-	username		text NOT NULL,
-	pass			text,
-	reply			text,
-	CalledStationId		text,
-	CallingStationId	text,
-	authdate		timestamp with time zone NOT NULL default now()
-);
-
-/*
- * Table structure for table 'nas'
- */
-CREATE TABLE nas (
-	id			serial PRIMARY KEY,
-	nasname			text NOT NULL,
-	shortname		text NOT NULL,
-	type			text NOT NULL DEFAULT 'other',
-	ports			integer,
-	secret			text NOT NULL,
-	server			text,
-	community		text,
-	description		text
-);
-create index nas_nasname on nas (nasname);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql
deleted file mode 100644
index 50bfe71..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-/*
- * admin.sql -- PostgreSQL commands for creating the RADIUS user.
- *
- *	WARNING: You should change 'localhost' and 'radpass'
- *		 to something else.  Also update raddb/sql.conf
- *		 with the new RADIUS password.
- *
- *	WARNING: This example file is untested.  Use at your own risk.
- *		 Please send any bug fixes to the mailing list.
- *
- *	$Id: 26d08cae41c788321bdf8fd1b0c41a443b2da6f4 $
- */
-
-/*
- *  Create default administrator for RADIUS
- */
-CREATE USER radius WITH PASSWORD 'radpass';
-
-/*
- * The server can read any table in SQL
- */
-GRANT SELECT ON radcheck TO radius;
-GRANT SELECT ON radreply TO radius;
-GRANT SELECT ON radgroupcheck TO radius;
-GRANT SELECT ON radgroupreply TO radius;
-GRANT SELECT ON radusergroup TO radius;
-
-/*
- * The server can write to the accounting and post-auth logging table.
- */
-GRANT SELECT, INSERT, UPDATE on radacct TO radius;
-GRANT SELECT, INSERT, UPDATE on radpostauth TO radius;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf
deleted file mode 100644
index c91f543..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf
+++ /dev/null
@@ -1,397 +0,0 @@
-# -*- text -*-
-#
-#  main/sqlite/queries.conf -- SQLite configuration for default schema (schema.sql)
-#
-#  Id: e1e83bf94814ed8be6239977b7bacfed21c0cd6a $
-
-# Safe characters list for sql queries. Everything else is replaced
-# with their mime-encoded equivalents.
-# The default list should be ok
-#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
-
-#######################################################################
-#  Query config:  Username
-#######################################################################
-# This is the username that will get substituted, escaped, and added
-# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used below
-# everywhere a username substitution is needed so you you can be sure
-# the username passed from the client is escaped properly.
-#
-# Uncomment the next line, if you want the sql_user_name to mean:
-#
-#	Use Stripped-User-Name, if it's there.
-#	Else use User-Name, if it's there,
-#	Else use hard-coded string "DEFAULT" as the user name.
-#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
-#
-sql_user_name = "%{User-Name}"
-
-#######################################################################
-# Default profile
-#######################################################################
-# This is the default profile. It is found in SQL by group membership.
-# That means that this profile must be a member of at least one group
-# which will contain the corresponding check and reply items.
-# This profile will be queried in the authorize section for every user.
-# The point is to assign all users a default profile without having to
-# manually add each one to a group that will contain the profile.
-# The SQL module will also honor the User-Profile attribute. This
-# attribute can be set anywhere in the authorize section (ie the users
-# file). It is found exactly as the default profile is found.
-# If it is set then it will *overwrite* the default profile setting.
-# The idea is to select profiles based on checks on the incoming packets,
-# not on user group membership. For example:
-# -- users file --
-# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
-# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
-#
-# By default the default_user_profile is not set
-#
-#default_user_profile = "DEFAULT"
-
-#######################################################################
-# NAS Query
-#######################################################################
-# This query retrieves the radius clients
-#
-# 0. Row ID (currently unused)
-# 1. Name (or IP address)
-# 2. Shortname
-# 3. Type
-# 4. Secret
-# 5. Server
-#######################################################################
-
-client_query = "\
-	SELECT id, nasname, shortname, type, secret, server \
-	FROM ${client_table}"
-
-#######################################################################
-# Authorization Queries
-#######################################################################
-# These queries compare the check items for the user
-# in ${authcheck_table} and setup the reply items in
-# ${authreply_table}. You can use any query/tables
-# you want, but the return data for each row MUST
-# be in the following order:
-#
-# 0. Row ID (currently unused)
-# 1. UserName/GroupName
-# 2. Item Attr Name
-# 3. Item Attr Value
-# 4. Item Attr Operation
-#######################################################################
-
-#
-#  Use these for case sensitive usernames.
-#
-#authorize_check_query = "\
-#	SELECT id, username, attribute, value, op \
-#	FROM ${authcheck_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY id"
-
-#authorize_reply_query = "\
-#	SELECT id, username, attribute, value, op \
-#	FROM ${authreply_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY id"
-
-#
-#  The default queries are case insensitive. (for compatibility with older versions of FreeRADIUS)
-#
-authorize_check_query = "\
-	SELECT id, username, attribute, value, op \
-	FROM ${authcheck_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-authorize_reply_query = "\
-	SELECT id, username, attribute, value, op \
-	FROM ${authreply_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY id"
-
-#
-# Use these for case sensitive usernames.
-#
-#group_membership_query = "\
-#	SELECT groupname \
-#	FROM ${usergroup_table} \
-#	WHERE username = BINARY '%{SQL-User-Name}' \
-#	ORDER BY priority"
-
-group_membership_query = "\
-	SELECT groupname \
-	FROM ${usergroup_table} \
-	WHERE username = '%{SQL-User-Name}' \
-	ORDER BY priority"
-
-authorize_group_check_query = "\
-	SELECT id, groupname, attribute, \
-	Value, op \
-	FROM ${groupcheck_table} \
-	WHERE groupname = '%{Sql-Group}' \
-	ORDER BY id"
-
-authorize_group_reply_query = "\
-	SELECT id, groupname, attribute, \
-	value, op \
-	FROM ${groupreply_table} \
-	WHERE groupname = '%{Sql-Group}' \
-	ORDER BY id"
-
-#######################################################################
-# Simultaneous Use Checking Queries
-#######################################################################
-# simul_count_query	- query for the number of current connections
-#			- If this is not defined, no simultaneouls use checking
-#			- will be performed by this module instance
-# simul_verify_query	- query to return details of current connections
-#				for verification
-#			- Leave blank or commented out to disable verification step
-#			- Note that the returned field order should not be changed.
-#######################################################################
-
-#
-#  Uncomment simul_count_query to enable simultaneous use checking
-#
-#simul_count_query = "\
-#	SELECT COUNT(*) \
-#	FROM ${acct_table1} \
-#	WHERE username = '%{SQL-User-Name}' \
-#	AND acctstoptime IS NULL"
-
-simul_verify_query = "\
-	SELECT radacctid, acctsessionid, username, nasipaddress, nasportid, framedipaddress, \
-		callingstationid, framedprotocol \
-	FROM ${acct_table1} \
-	WHERE username = '%{SQL-User-Name}' \
-	AND acctstoptime IS NULL"
-
-#######################################################################
-# Accounting and Post-Auth Queries
-#######################################################################
-# These queries insert/update accounting and authentication records.
-# The query to use is determined by the value of 'reference'.
-# This value is used as a configuration path and should resolve to one
-# or more 'query's. If reference points to multiple queries, and a query
-# fails, the next query is executed.
-#
-# Behaviour is identical to the old 1.x/2.x module, except we can now
-# fail between N queries, and query selection can be based on any
-# combination of attributes, or custom 'Acct-Status-Type' values.
-#######################################################################
-accounting {
-	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
-
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/accounting.sql
-
-	column_list = "\
-		acctsessionid,		acctuniqueid,		username, \
-		realm,			nasipaddress,		nasportid, \
-		nasporttype,		acctstarttime,		acctupdatetime, \
-		acctstoptime,		acctsessiontime, 	acctauthentic, \
-		connectinfo_start,	connectinfo_stop, 	acctinputoctets, \
-		acctoutputoctets,	calledstationid, 	callingstationid, \
-		acctterminatecause,	servicetype,		framedprotocol, \
-		framedipaddress"
-
-	type {
-		accounting-on {
-			#
-			#  Bulk terminate all sessions associated with a given NAS
-			#
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					acctstoptime = %{%{integer:Event-Timestamp}:-date('now')}, \
-					acctsessiontime	= \
-						%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} \
-						- strftime('%s', acctstarttime)), \
-					acctterminatecause = '%{Acct-Terminate-Cause}' \
-				WHERE acctstoptime IS NULL \
-				AND nasipaddress   = '%{NAS-IP-Address}' \
-				AND acctstarttime <= %{integer:Event-Timestamp}"
-		}
-
-		accounting-off {
-			query = "${..accounting-on.query}"
-		}
-
-		start {
-			#
-			#  Insert a new record into the sessions table
-			#
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					%{%{integer:Event-Timestamp}:-date('now')}, \
-					%{%{integer:Event-Timestamp}:-date('now')}, \
-					NULL, \
-					'0', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					'0', \
-					'0', \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-
-			#
-			#  Key constraints prevented us from inserting a new session,
-			#  use the alternate query to update an existing session.
-			#
-			query = "\
-				UPDATE ${....acct_table1} SET \
-					acctstarttime	= %{%{integer:Event-Timestamp}:-date('now')}, \
-					acctupdatetime	= %{%{integer:Event-Timestamp}:-date('now'))}, \
-					connectinfo_start = '%{Connect-Info}' \
-				WHERE acctsessionid = '%{Acct-Session-Id}' \
-				AND username		= '%{SQL-User-Name}' \
-				AND nasipaddress	= '%{NAS-IP-Address}'"
-		}
-
-		interim-update {
-			#
-			#  Update an existing session and calculate the interval
-			#  between the last data we received for the session and this
-			#  update. This can be used to find stale sessions.
-			#
-			query = "\
-				UPDATE ${....acct_table1} \
-				SET \
-					acctupdatetime  = %{%{integer:Event-Timestamp}:-date('now')}, \
-					acctinterval    = 0, \
-					framedipaddress = '%{Framed-IP-Address}', \
-					acctsessiontime = '%{Acct-Session-Time}', \
-					acctinputoctets = %{%{Acct-Input-Gigawords}:-0} \
-						<< 32 | %{%{Acct-Input-Octets}:-0}, \
-					acctoutputoctets = %{%{Acct-Output-Gigawords}:-0} \
-						<< 32 | %{%{Acct-Output-Octets}:-0} \
-				WHERE acctsessionid     = '%{Acct-Session-Id}' \
-				AND username            = '%{SQL-User-Name}' \
-				AND nasipaddress        = '%{NAS-IP-Address}'"
-
-			#
-			#  The update condition matched no existing sessions. Use
-			#  the values provided in the update to create a new session.
-			#
-			query = "\
-				INSERT INTO ${....acct_table1} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					(%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} - %{%{Acct-Session-Time}:-0}), \
-					%{%{integer:Event-Timestamp}:-date('now')}, \
-					NULL, \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'%{Connect-Info}', \
-					'', \
-					%{%{Acct-Input-Gigawords}:-0} << 32 | \
-						%{%{Acct-Input-Octets}:-0}, \
-					%{%{Acct-Output-Gigawords}:-0} << 32 | \
-						%{%{Acct-Output-Octets}:-0}, \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-		}
-
-		stop {
-			#
-			#  Session has terminated, update the stop time and statistics.
-			#
-			query = "\
-				UPDATE ${....acct_table2} SET \
-					acctstoptime	= %{%{integer:Event-Timestamp}:-date('now')}, \
-					acctsessiontime	= '%{Acct-Session-Time}', \
-					acctinputoctets	= %{%{Acct-Input-Gigawords}:-0} \
-						<< 32 | %{%{Acct-Input-Octets}:-0}, \
-					acctoutputoctets = %{%{Acct-Output-Gigawords}:-0} \
-						<< 32 | %{%{Acct-Output-Octets}:-0}, \
-					acctterminatecause = '%{Acct-Terminate-Cause}', \
-					connectinfo_stop = '%{Connect-Info}' \
-				WHERE acctsessionid 	= '%{Acct-Session-Id}' \
-				AND username		= '%{SQL-User-Name}' \
-				AND nasipaddress	= '%{NAS-IP-Address}'"
-
-			#
-			#  The update condition matched no existing sessions. Use
-			#  the values provided in the update to create a new session.
-			#
-			query = "\
-				INSERT INTO ${....acct_table2} \
-					(${...column_list}) \
-				VALUES \
-					('%{Acct-Session-Id}', \
-					'%{Acct-Unique-Session-Id}', \
-					'%{SQL-User-Name}', \
-					'%{Realm}', \
-					'%{NAS-IP-Address}', \
-					'%{NAS-Port}', \
-					'%{NAS-Port-Type}', \
-					%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} - %{%{Acct-Session-Time}:-0}), \
-					%{%{integer:Event-Timestamp}:-date('now')}, \
-					%{%{integer:Event-Timestamp}:-date('now')}, \
-					'%{Acct-Session-Time}', \
-					'%{Acct-Authentic}', \
-					'', \
-					'%{Connect-Info}', \
-					%{%{Acct-Input-Gigawords}:-0} << 32 | \
-						%{%{Acct-Input-Octets}:-0}, \
-					%{%{Acct-Output-Gigawords}:-0} << 32 | \
-						%{%{Acct-Output-Octets}:-0}, \
-					'%{Called-Station-Id}', \
-					'%{Calling-Station-Id}', \
-					'%{Acct-Terminate-Cause}', \
-					'%{Service-Type}', \
-					'%{Framed-Protocol}', \
-					'%{Framed-IP-Address}')"
-		}
-	}
-}
-
-#######################################################################
-# Authentication Logging Queries
-#######################################################################
-# postauth_query	- Insert some info after authentication
-#######################################################################
-
-post-auth {
-	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
-	# when used with the rlm_sql_null driver.
-#	logfile = ${logdir}/post-auth.sql
-
-	query =	"\
-		INSERT INTO ${..postauth_table} \
-			(username, pass, reply, authdate) \
-		VALUES ( \
-			'%{SQL-User-Name}', \
-			'%{%{User-Password}:-%{Chap-Password}}', \
-			'%{reply:Packet-Type}', \
-			 %{%{integer:Event-Timestamp}:-date('now')})"
-}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql
deleted file mode 100644
index a531ad5..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql
+++ /dev/null
@@ -1,153 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
------------------------------------------------------------------------------
--- $Id: 83a455e620e5ac9603c659697ce9c756c9ccddb1 $                 	   --
---                                                                         --
---  schema.sql                       rlm_sql - FreeRADIUS SQLite Module    --
---                                                                         --
---     Database schema for SQLite rlm_sql module                           --
---                                                                         --
---     To load:                                                            --
---         mysql -uroot -prootpass radius < schema.sql                     --
---                                                                         --
------------------------------------------------------------------------------
-
---
--- Table structure for table 'radacct'
---
-CREATE TABLE radacct (
-  radacctid bigint(21) PRIMARY KEY,
-  acctsessionid varchar(64) NOT NULL default '',
-  acctuniqueid varchar(32) NOT NULL default '',
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  realm varchar(64) default '',
-  nasipaddress varchar(15) NOT NULL default '',
-  nasportid varchar(15) default NULL,
-  nasporttype varchar(32) default NULL,
-  acctstarttime datetime NULL default NULL,
-  acctupdatetime datetime NULL default NULL,
-  acctstoptime datetime NULL default NULL,
-  acctinterval int(12) default NULL,
-  acctsessiontime int(12) default NULL,
-  acctauthentic varchar(32) default NULL,
-  connectinfo_start varchar(50) default NULL,
-  connectinfo_stop varchar(50) default NULL,
-  acctinputoctets bigint(20) default NULL,
-  acctoutputoctets bigint(20) default NULL,
-  calledstationid varchar(50) NOT NULL default '',
-  callingstationid varchar(50) NOT NULL default '',
-  acctterminatecause varchar(32) NOT NULL default '',
-  servicetype varchar(32) default NULL,
-  framedprotocol varchar(32) default NULL,
-  framedipaddress varchar(15) NOT NULL default ''
-);
-
-CREATE UNIQUE INDEX acctuniqueid ON radacct(acctuniqueid);
-CREATE INDEX username ON radacct(username);
-CREATE INDEX framedipaddress ON radacct (framedipaddress);
-CREATE INDEX acctsessionid ON radacct(acctsessionid);
-CREATE INDEX acctsessiontime ON radacct(acctsessiontime);
-CREATE INDEX acctstarttime ON radacct(acctstarttime);
-CREATE INDEX acctinterval ON radacct(acctinterval);
-CREATE INDEX acctstoptime ON radacct(acctstoptime);
-CREATE INDEX nasipaddress ON radacct(nasipaddress);
-
---
--- Table structure for table 'radcheck'
---
-CREATE TABLE radcheck (
-  id int(11) PRIMARY KEY,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253) NOT NULL default ''
-);
-CREATE INDEX check_username ON radcheck(username);
-
---
--- Table structure for table 'radgroupcheck'
---
-CREATE TABLE radgroupcheck (
-  id int(11) PRIMARY KEY,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '==',
-  value varchar(253)  NOT NULL default ''
-);
-CREATE INDEX check_groupname ON radgroupcheck(groupname);
-
---
--- Table structure for table 'radgroupreply'
---
-CREATE TABLE radgroupreply (
-  id int(11) PRIMARY KEY,
-  groupname varchar(64) NOT NULL default '',
-  attribute varchar(64)  NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253)  NOT NULL default ''
-);
-CREATE INDEX reply_groupname ON radgroupreply(groupname);
-
---
--- Table structure for table 'radreply'
---
-CREATE TABLE radreply (
-  id int(11) PRIMARY KEY,
-  username varchar(64) NOT NULL default '',
-  attribute varchar(64) NOT NULL default '',
-  op char(2) NOT NULL DEFAULT '=',
-  value varchar(253) NOT NULL default ''
-);
-CREATE INDEX reply_username ON radreply(username);
-
---
--- Table structure for table 'radusergroup'
---
-CREATE TABLE radusergroup (
-  username varchar(64) NOT NULL default '',
-  groupname varchar(64) NOT NULL default '',
-  priority int(11) NOT NULL default '1'
-);
-CREATE INDEX usergroup_username ON radusergroup(username);
-
---
--- Table structure for table 'radpostauth'
---
-CREATE TABLE radpostauth (
-  id int(11) PRIMARY KEY,
-  username varchar(64) NOT NULL default '',
-  pass varchar(64) NOT NULL default '',
-  reply varchar(32) NOT NULL default '',
-  authdate timestamp NOT NULL
-);
-
---
--- Table structure for table 'nas'
---
-CREATE TABLE nas (
-  id int(11) PRIMARY KEY,
-  nasname varchar(128) NOT NULL,
-  shortname varchar(32),
-  type varchar(30) DEFAULT 'other',
-  ports int(5),
-  secret varchar(60) DEFAULT 'secret' NOT NULL,
-  server varchar(64),
-  community varchar(50),
-  description varchar(200) DEFAULT 'RADIUS Client'
-);
-CREATE INDEX nasname ON nas(nasname);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf b/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf
deleted file mode 100644
index 9aac368..0000000
--- a/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-server:
- num-threads: 2
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/always b/src/test/setup/radius-config/freeradius/mods-enabled/always
deleted file mode 120000
index 2cc1029..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/always
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/always
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter b/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter
deleted file mode 120000
index 400dfd1..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/attr_filter
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap b/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap
deleted file mode 120000
index 22cfe44..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/cache_eap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/chap b/src/test/setup/radius-config/freeradius/mods-enabled/chap
deleted file mode 120000
index 6ccd392..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/chap
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/chap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/detail b/src/test/setup/radius-config/freeradius/mods-enabled/detail
deleted file mode 120000
index ad00d0e..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/detail
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/detail
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/dhcp b/src/test/setup/radius-config/freeradius/mods-enabled/dhcp
deleted file mode 120000
index 7b16f23..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/dhcp
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/dhcp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/digest b/src/test/setup/radius-config/freeradius/mods-enabled/digest
deleted file mode 120000
index 95d3d36..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/digest
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/digest
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients b/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients
deleted file mode 120000
index 7b030ba..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/dynamic_clients
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/eap b/src/test/setup/radius-config/freeradius/mods-enabled/eap
deleted file mode 120000
index 37bab92..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/eap
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/eap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/echo b/src/test/setup/radius-config/freeradius/mods-enabled/echo
deleted file mode 120000
index a436e68..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/echo
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/echo
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/exec b/src/test/setup/radius-config/freeradius/mods-enabled/exec
deleted file mode 120000
index a42a481..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/exec
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/exec
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/expiration b/src/test/setup/radius-config/freeradius/mods-enabled/expiration
deleted file mode 120000
index 340f641..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/expiration
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/expiration
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/expr b/src/test/setup/radius-config/freeradius/mods-enabled/expr
deleted file mode 120000
index 64dd3ab..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/expr
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/expr
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/files b/src/test/setup/radius-config/freeradius/mods-enabled/files
deleted file mode 120000
index 372bc86..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/files
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/files
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/linelog b/src/test/setup/radius-config/freeradius/mods-enabled/linelog
deleted file mode 120000
index d6acab4..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/linelog
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/linelog
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/logintime b/src/test/setup/radius-config/freeradius/mods-enabled/logintime
deleted file mode 120000
index 99b698e..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/logintime
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/logintime
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/mschap b/src/test/setup/radius-config/freeradius/mods-enabled/mschap
deleted file mode 120000
index c7523de..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/mschap
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/mschap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth b/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth
deleted file mode 120000
index 3d68f67..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/ntlm_auth
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/pap b/src/test/setup/radius-config/freeradius/mods-enabled/pap
deleted file mode 120000
index 07f986f..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/pap
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/pap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/passwd b/src/test/setup/radius-config/freeradius/mods-enabled/passwd
deleted file mode 120000
index be64f8b..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/passwd
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/passwd
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/preprocess b/src/test/setup/radius-config/freeradius/mods-enabled/preprocess
deleted file mode 120000
index 266822a..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/preprocess
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/preprocess
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/radutmp b/src/test/setup/radius-config/freeradius/mods-enabled/radutmp
deleted file mode 120000
index e3c390c..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/radutmp
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/radutmp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/realm b/src/test/setup/radius-config/freeradius/mods-enabled/realm
deleted file mode 120000
index acc66be..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/realm
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/realm
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/replicate b/src/test/setup/radius-config/freeradius/mods-enabled/replicate
deleted file mode 120000
index b03d8de..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/replicate
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/replicate
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/soh b/src/test/setup/radius-config/freeradius/mods-enabled/soh
deleted file mode 120000
index af88216..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/soh
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/soh
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/sql b/src/test/setup/radius-config/freeradius/mods-enabled/sql
deleted file mode 120000
index 5c9aa11..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/sql
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/sql
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp b/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp
deleted file mode 120000
index ac90674..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/sradutmp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/unix b/src/test/setup/radius-config/freeradius/mods-enabled/unix
deleted file mode 120000
index 599fdef..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/unix
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/unix
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/unpack b/src/test/setup/radius-config/freeradius/mods-enabled/unpack
deleted file mode 120000
index dad4563..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/unpack
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/unpack
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/utf8 b/src/test/setup/radius-config/freeradius/mods-enabled/utf8
deleted file mode 120000
index 7979255..0000000
--- a/src/test/setup/radius-config/freeradius/mods-enabled/utf8
+++ /dev/null
@@ -1 +0,0 @@
-../mods-available/utf8
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/panic.gdb b/src/test/setup/radius-config/freeradius/panic.gdb
deleted file mode 100644
index 3ae253a..0000000
--- a/src/test/setup/radius-config/freeradius/panic.gdb
+++ /dev/null
@@ -1,4 +0,0 @@
-info locals
-info args
-thread apply all bt full
-quit
diff --git a/src/test/setup/radius-config/freeradius/policy.d/accounting b/src/test/setup/radius-config/freeradius/policy.d/accounting
deleted file mode 100644
index 201f5e5..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/accounting
+++ /dev/null
@@ -1,72 +0,0 @@
-# We check for this prefix to determine whether the class
-# value was generated by this server.  It should be changed
-# so that it is globally unique.
-class_value_prefix = 'ai:'
-
-#
-#	Replacement for the old rlm_acct_unique module
-#
-acct_unique {
-	#
-	#  If we have a class attribute in the format
-	#  'auth_id:[0-9a-f]{32}' it'll have a local value
-	#  (defined by insert_acct_class), this ensures
-	#  uniqueness and suitability.
-	#
-	#  We could just use the Class attribute as
-	#  Acct-Unique-Session-Id, but this may cause problems
-	#  with NAS that carry Class values across between
-	#  multiple linked sessions.  So we rehash class with
-	#  Acct-Session-ID to provide a truely unique session
-	#  identifier.
-	#
-	#  Using a Class/Session-ID combination is more robust
-	#  than using elements in the Accounting-Request,
-	#  which may be subject to change, such as
-	#  NAS-IP-Address, Client-IP-Address and
-	#  NAS-Port-ID/NAS-Port.
-	#
-	#  This policy should ensure that session data is not
-	#  affected if NAS IP addresses change, or the client
-	#  roams to a different 'port' whilst maintaining its
-	#  initial authentication session (Common in a
-	#  wireless environment).
-	#
-	if("%{string:Class}" =~ /${policy.class_value_prefix}([0-9a-f]{32})/i) {
-		update request {
-			Acct-Unique-Session-Id := "%{md5:%{1},%{Acct-Session-ID}}"
-		}
-	}
-
-	#
-	#  Not All devices respect RFC 2865 when dealing with
-	#  the class attribute, so be prepared to use the
-	#  older style of hashing scheme if a class attribute
-	#  is not included
-	#
-	else {
-		update request {
-			Acct-Unique-Session-Id := "%{md5:%{User-Name},%{Acct-Session-ID},%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}},%{NAS-Identifier},%{NAS-Port-ID},%{NAS-Port}}"
-		 }
-	}
-}
-
-#
-#	Insert a (hopefully unique) value into class
-#
-insert_acct_class {
-	update reply {
-		Class = "${policy.class_value_prefix}%{md5:%t,%I,%{Packet-Src-Port},%{Packet-Src-IP-Address},%{NAS-IP-Address},%{Calling-Station-ID},%{User-Name}}"
-	}
-}
-
-#
-#	Merges Acct-[Input|Output]-Octets and Acct-[Input|Output]-Gigawords into Acct-[Input|Output]-Octets64
-#
-acct_counters64.preacct {
-	update request {
-		Acct-Input-Octets64 = "%{expr:(%{%{Acct-Input-Gigawords}:-0} * 4294967296) + %{%{Acct-Input-Octets}:-0}}"
-		Acct-Output-Octets64 = "%{expr:(%{%{Acct-Output-Gigawords}:-0} * 4294967296) + %{%{Acct-Output-Octets}:-0}}"
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/policy.d/canonicalization b/src/test/setup/radius-config/freeradius/policy.d/canonicalization
deleted file mode 100644
index c1cb357..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/canonicalization
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-#	Split User-Name in NAI format (RFC 4282) into components
-#
-#  This policy writes the Username and Domain portions of the
-#  NAI into the Stripped-User-Name and Stripped-User-Domain
-#  attributes.
-#
-#  The regular expression to do this is not strictly compliant
-#  with the standard, but it is not possible to write a
-#  compliant regexp without perl style regular expressions (or
-#  at least not a legible one).
-#
-nai_regexp = "^([^@]*)(@([-[:alnum:]]+\\.[-[:alnum:].]+))?$"
-
-split_username_nai {
-	if(User-Name =~ /${policy.nai_regexp}/){
-		update request {
-			Stripped-User-Name := "%{1}"
-			Stripped-User-Domain = "%{3}"
-		}
-
-		# If any of the expansions result in a null
-		# string, the update section may return
-		# something other than updated...
-		updated
-	}
-	else {
-		noop
-	}
-}
-
-#
-#  If called in post-proxy we modify the proxy-reply message
-#
-split_username_nai.post-proxy {
-	if(proxy-reply:User-Name =~ /${policy.nai_regexp}/){
-		update proxy-reply {
-			Stripped-User-Name := "%{1}"
-			Stripped-User-Domain = "%{3}"
-		}
-		updated
-	}
-	else {
-		noop
-	}
-}
-
-#
-#  Normalize the MAC Addresses in the Calling/Called-Station-Id
-#
-mac-addr-regexp = ([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})
-
-#
-#  Add "rewrite_called_station_id" in the "authorize" and
-#  "preacct" sections.
-#
-rewrite_called_station_id {
-	if(Called-Station-Id =~ /^${policy.mac-addr-regexp}(:(.+))?$/i) {
-		update request {
-			Called-Station-Id := "%{tolower:%{1}-%{2}-%{3}-%{4}-%{5}-%{6}}"
-		}
-
-		# SSID component?
-		if ("%{8}") {
-			update request {
-				Called-Station-SSID := "%{8}"
-			}
-		}
-		updated
-	}
-	else {
-		noop
-	}
-}
-
-#
-#  Add "rewrite_calling_station_id" in the "authorize" and
-#  "preacct" sections.
-#
-rewrite_calling_station_id {
-	if(Calling-Station-Id =~ /^${policy.mac-addr-regexp}$/i) {
-		update request {
-			Calling-Station-Id := "%{tolower:%{1}-%{2}-%{3}-%{4}-%{5}-%{6}}"
-		}
-		updated
-	}
-	else {
-		noop
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/policy.d/control b/src/test/setup/radius-config/freeradius/policy.d/control
deleted file mode 100644
index b056ed1..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/control
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#  If you want the server to pretend that it is dead,
-#  then use the "do_not_respond" policy.
-#
-do_not_respond {
-	update control {
-		Response-Packet-Type := Do-Not-Respond
-	}
-
-	handled
-}
-
-#
-#  Have the server accept the current request.
-#  Can only be called from authorize.
-#  Unlike calling the always module instance 'reject' the request will continue to be processed.
-#
-accept.authorize {
-	update control {
-		Auth-Type := accept
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/policy.d/cui b/src/test/setup/radius-config/freeradius/policy.d/cui
deleted file mode 100644
index f0302b8..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/cui
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-#  The following policies are for the Chargeable-User-Identity
-#  (CUI) configuration.
-#
-#  The policies below can be called as just 'cui' (not
-#  cui.authorize etc..)  from the various config sections.
-#
-
-#
-#  cui_hash_key definition
-#  This key serves the purpose of protecting CUI values against
-#  dictionary attacks, therefore should be chosen as a "random"
-#  string and kept secret.
-#
-cui_hash_key = "changeme"
-
-#
-# cui_require_operator_name switch
-# If this is set to nonzero value then CUI will only be added
-# when a non-empty Operator-Name value is present in the request
-#
-cui_require_operator_name = "no"
-
-#
-#  The client indicates it can do CUI by sending a CUI attribute
-#  containing one zero byte.
-#  A non-empty value in Operator-Name can be an additional requirement.
-#  Normally CUI support is turned on only for such requests.
-#  CUI support can be used for local clients which do not
-#  supports CUI themselves, the server can simulate a CUI request
-#  adding the missing NUL CUI value and the Operator-Name attribute.
-#  Clients which are supposed to get this treatment should
-#  be marked by add_cui flag in clients.conf
-#  We assume that local clients are marked in the client.conf with
-#  add_cui flag, e.g.
-#  client xxxx {
-#    ...
-#    add_cui = yes
-#  }
-#
-cui.authorize {
-	if ("%{client:add_cui}" == 'yes') {
-		update request {
-			Chargeable-User-Identity := '\\000'
-		}
-	}
-}
-
-#
-#  Before proxing an Access-Request to a remote server, a NUL CUI
-#  attribute should be added, unless it is already present in the request.
-#
-cui.pre-proxy {
-	if (("%{request:Packet-Type}" == 'Access-Request') && ("%{client:add_cui}" == 'yes')) {
-		update proxy-request {
-			Chargeable-User-Identity = '\\000'
-		}
-	}
-}
-
-
-#
-#  Add a CUI attribute based on the User-Name, and a secret key
-#  known only to this server.
-#  For EAP-TTLS and EAP-PEAP methods
-#  use_tunneled_reply parameter MUST be set to yes
-#
-cui.post-auth {
-	if (!control:Proxy-To-Realm && Chargeable-User-Identity && !reply:Chargeable-User-Identity && \
-	    (Operator-Name || ('${policy.cui_require_operator_name}' != 'yes')) ) {
-		update reply {
-			Chargeable-User-Identity = "%{sha1:${policy.cui_hash_key}%{tolower:%{User-Name}%{%{Operator-Name}:-}}}"
-		}
-	}
-
-	update reply {
-		User-Name !* ANY	# remove User-Name from the reply for security
-	}
-
-	#
-	#  The section below will store a CUI for the User in the DB.
-	#  You need to configure the cuisql module and your database for this to work.
-	#  If your NAS can do CUI based accounting themselves or you do not care about
-	#  accounting, comment out the three lines below.
-	#
-	if (reply:Chargeable-User-Identity) {
-		cuisql
-	}
-}
-
-
-cui-inner.post-auth {
-	if (outer.request:Chargeable-User-Identity && \
-	    (outer.request:Operator-Name || ('${policy.cui_require_operator_name}' != 'yes'))) {
-		update reply {
-			Chargeable-User-Identity := "%{sha1:${policy.cui_hash_key}%{tolower:%{User-Name}%{%{outer.request:Operator-Name}:-}}}"
-		}
-	}
-}
-
-#
-#  If your NAS can do CUI based accounting or you do not care about
-#  accounting then just comment out the call to cui in ......
-#
-#  If we had stored a CUI for the User, add it to the request.
-#
-cui.accounting {
-	#
-	#  If the CUI isn't in the packet, see if we can find it
-	#  in the DB.
-	#
-	if (!Chargeable-User-Identity) {
-		update request {
-			Chargeable-User-Identity := "%{cuisql:\
-				SELECT cui FROM cui \
-				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
-				AND callingstationid = '%{Calling-Station-Id}' \
-				AND username = '%{User-Name}'}"
-		}
-	}
-
-	#
-	#  If it exists now, then write out when we last saw
-	#  this CUI.
-	#
-	if (Chargeable-User-Identity && (Chargeable-User-Identity != '')) {
-		cuisql
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/policy.d/dhcp b/src/test/setup/radius-config/freeradius/policy.d/dhcp
deleted file mode 100644
index 4396f06..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/dhcp
+++ /dev/null
@@ -1,25 +0,0 @@
-#  Assign compatibility data to request for sqlippool
-dhcp_sqlippool.post-auth {
-
-
-	#  Do some minor hacks to the request so that it looks
-	#  like a RADIUS request to the SQL IP Pool module.
-	update request {
-		User-Name = "DHCP-%{DHCP-Client-Hardware-Address}"
-		Calling-Station-Id = "%{DHCP-Client-Hardware-Address}"
-		NAS-IP-Address = "%{%{DHCP-Gateway-IP-Address}:-127.0.0.1}"
-		Acct-Status-Type = Start
-	}
-
-	#  Call the actual module
-	dhcp_sqlippool
-
-	#  Convert Framed-IP-Address to DHCP, but only if we
-	#  actually allocated an address.
-	if (ok) {
-		update reply {
-			DHCP-Your-IP-Address = "%{reply:Framed-IP-Address}"
-		}
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/policy.d/eap b/src/test/setup/radius-config/freeradius/policy.d/eap
deleted file mode 100644
index 46d6945..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/eap
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-#	Response caching to handle proxy failovers
-#
-Xeap.authorize {
-	cache_eap
-	if (ok) {
-		#
-		#	Expire previous cache entry
-		#
-		if (control:State) {
-			update control {
-				Cache-TTL := 0
-			}
-			cache_eap
-
-			update control {
-				Cache-TTL !* ANY
-				State !* ANY
-			}
-		}
-
-		handled
-	}
-	else {
-		eap.authorize
-	}
-}
-
-#
-#	Populate cache with responses from the EAP module
-#
-Xeap.authenticate {
-	eap {
-		handled = 1
-	}
-	if (handled) {
-		cache_eap.authorize
-
-		handled
-	}
-
-	cache_eap.authorize
-}
-
-#
-#       Forbid all EAP types.  Enable this by putting "forbid_eap"
-#       into the "authorize" section.
-#
-forbid_eap {
-	if (EAP-Message) {
-		reject
-	}
-}
-
-#
-#       Forbid all non-EAP types outside of an EAP tunnel.
-#
-permit_only_eap {
-	if (!EAP-Message) {
-		#  We MAY be inside of a TTLS tunnel.
-		#  PEAP and EAP-FAST require EAP inside of
-		#  the tunnel, so this check is OK.
-		#  If so, then there MUST be an outer EAP message.
-		if (outer.request && outer.request:EAP-Message) {
-			reject
-		}
-	}
-}
-
-#
-#       Remove Reply-Message from response if were doing EAP
-#
-#  Be RFC 3579 2.6.5 compliant - EAP-Message and Reply-Message should
-#  not be present in the same response.
-#
-remove_reply_message_if_eap {
-	if(reply:EAP-Message && reply:Reply-Message) {
-		update reply {
-			Reply-Message !* ANY
-		}
-	}
-	else {
-		noop
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/policy.d/filter b/src/test/setup/radius-config/freeradius/policy.d/filter
deleted file mode 100644
index c881b3d..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/filter
+++ /dev/null
@@ -1,93 +0,0 @@
-#
-#	Example of forbidding all attempts to login via
-#	realms.
-#
-deny_realms {
-	if (User-Name =~ /@|\\/) {
-		reject
-	}
-}
-
-#
-#	Filter the username
-#
-#  Force some sanity on User-Name. This helps to avoid issues
-#  issues where the back-end database is "forgiving" about
-#  what constitutes a user name.
-#
-filter_username {
-	#
-	#  reject mixed case
-	#  e.g. "UseRNaMe"
-	#
-	if (User-Name != "%{tolower:%{User-Name}}") {
-		reject
-	}
-
-	#
-	#  reject all whitespace
-	#  e.g. "user@ site.com", or "us er", or " user", or "user "
-	#
-	if (User-Name =~ / /) {
-		update reply {
-			Reply-Message += "Rejected: Username contains whitespace"
-		}
-		reject
-	}
-
-	#
-	#  reject Multiple @'s
-	#  e.g. "user@site.com@site.com"
-	#
-	if(User-Name =~ /@.*@/ ) {
-		update reply {
-			Reply-Message += "Rejected: Multiple @ in username"
-		}
-		reject
-	}
-
-	#
-	#  reject double dots
-	#  e.g. "user@site..com"
-	#
-	if (User-Name =~ /\\.\\./ ) {
-		update reply {
-			Reply-Message += "Rejected: Username contains ..s"
-		}
-		reject
-	}
-
-	#
-	#  must have at least 1 string-dot-string after @
-	#  e.g. "user@site.com"
-	#
-	if ((User-Name =~ /@/) && (User-Name !~ /@(.+)\\.(.+)$/))  {
-		update reply {
-			Reply-Message += "Rejected: Realm does not have at least one dot separator"
-		}
-		reject
-	}
-
-	#
-	#  Realm ends with a dot
-	#  e.g. "user@site.com."
-	#
-	if (User-Name =~ /\\.$/)  {
-		update reply {
-			Reply-Message += "Rejected: Realm ends with a dot"
-		}
-		reject
-	}
-
-	#
-	#  Realm begins with a dot
-	#  e.g. "user@.site.com"
-	#
-	if (User-Name =~ /@\\./)  {
-		update reply {
-			Reply-Message += "Rejected: Realm begins with a dot"
-		}
-		reject
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/policy.d/operator-name b/src/test/setup/radius-config/freeradius/policy.d/operator-name
deleted file mode 100644
index a16fa1e..0000000
--- a/src/test/setup/radius-config/freeradius/policy.d/operator-name
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#  The following policies are for the Operator-Name
-#  configuration.
-#
-#  The policies below can be called as just 'operator-name' (not
-#  operator-name.authorize etc..)  from the various config sections.
-#
-
-#  If you require that the Operator-Name be set
-#  for local clients then call the 'operator-name' policy
-#  in the authorize section of the virtual-server for your clients in clients.conf
-
-#  To inject an Operator-Name whilst proxying, call the
-#  'operator-name' policy in the pre-proxy section of the virtual server
-#  No need to call this if you have already enabled this in
-#  the authorize section.
-
-#
-#  We assume that clients can have the operator-name definition
-#  in the client.conf, e.g.
-#  client xxxx {
-#    ...
-#    Operator-Name = 1your.domain
-#  }
-#  If this parameter is found for a client, then we add
-#  an Operator-Name attribute
-#
-operator-name.authorize {
-	if ("%{client:Operator-Name}") {
-		update request {
-			Operator-Name = "%{client:Operator-Name}"
-		}
-	}
-}
-
-#
-# Before proxing the client add an Operator-Name
-# attribute identifying this site if the operator-name is found for this client
-#
-operator-name.pre-proxy {
-	if (("%{request:Packet-Type}" == 'Access-Request') && "%{client:Operator-Name}") {
-		update proxy-request {
-			Operator-Name := "%{client:Operator-Name}"
-		}
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/proxy.conf b/src/test/setup/radius-config/freeradius/proxy.conf
deleted file mode 100644
index 0f61067..0000000
--- a/src/test/setup/radius-config/freeradius/proxy.conf
+++ /dev/null
@@ -1,804 +0,0 @@
-# -*- text -*-
-##
-## proxy.conf -- proxy radius and realm configuration directives
-##
-##	$Id: ae8fedf199ad3ec6197dee75db11769aafa88d07 $
-
-#######################################################################
-#
-#  Proxy server configuration
-#
-#  This entry controls the servers behaviour towards ALL other servers
-#  to which it sends proxy requests.
-#
-proxy server {
-	#
-	#  Note that as of 2.0, the "synchronous", "retry_delay",
-	#  "retry_count", and "dead_time" have all been deprecated.
-	#  For backwards compatibility, they are are still accepted
-	#  by the server, but they ONLY apply to the old-style realm
-	#  configuration.  i.e. realms with "authhost" and/or "accthost"
-	#  entries.
-	#
-	#  i.e. "retry_delay" and "retry_count" have been replaced
-	#  with per-home-server configuration.  See the "home_server"
-	#  example below for details.
-	#
-	#  i.e. "dead_time" has been replaced with a per-home-server
-	#  "revive_interval".  We strongly recommend that this not
-	#  be used, however.  The new method is much better.
-
-	#
-	#  In 2.0, the server is always "synchronous", and setting
-	#  "synchronous = no" is impossible.  This simplifies the
-	#  server and increases the stability of the network.
-	#  However, it means that the server (i.e. proxy) NEVER
-	#  originates packets.  It proxies packets ONLY when it receives
-	#  a packet or a re-transmission from the NAS.  If the NAS never
-	#  re-transmits, the proxy never re-transmits, either.  This can
-	#  affect fail-over, where a packet does *not* fail over to a
-	#  second home server.. because the NAS never retransmits the
-	#  packet.
-	#
-	#  If you need to set "synchronous = no", please send a
-	#  message to the list <freeradius-users@lists.freeradius.org>
-	#  explaining why this feature is vital for your network.
-
-	#
-	#  If a realm exists, but there are no live home servers for
-	#  it, we can fall back to using the "DEFAULT" realm.  This is
-	#  most useful for accounting, where the server can proxy
-	#  accounting requests to home servers, but if they're down,
-	#  use a DEFAULT realm that is LOCAL (i.e. accthost = LOCAL),
-	#  and then store the packets in the "detail" file.  That data
-	#  can be later proxied to the home servers by radrelay, when
-	#  those home servers come back up again.
-
-	#  Setting this to "yes" may have issues for authentication.
-	#  i.e. If you are proxying for two different ISP's, and then
-	#  act as a general dial-up for Gric.  If one of the first two
-	#  ISP's has their RADIUS server go down, you do NOT want to
-	#  proxy those requests to GRIC.  Instead, you probably want
-	#  to just drop the requests on the floor.  In that case, set
-	#  this value to 'no'.
-	#
-	#  allowed values: {yes, no}
-	#
-	default_fallback = no
-
-}
-
-#######################################################################
-#
-#  Configuration for the proxy realms.
-#
-#  As of 2.0. the old-style "realms" file is deprecated, and is not
-#  used by FreeRADIUS.
-#
-#  As of 2.0, the "realm" configuration has changed.  Instead of
-#  specifying "authhost" and "accthost" in a realm section, the home
-#  servers are specified separately in a "home_server" section.  For
-#  backwards compatibility, you can still use the "authhost" and
-#  "accthost" directives.  If you only have one home server for a
-#  realm, it is easier to use the old-style configuration.
-#
-#  However, if you have multiple servers for a realm, we STRONGLY
-#  suggest moving to the new-style configuration.
-#
-#
-#  Load-balancing and failover between home servers is handled via
-#  a "home_server_pool" section.
-#
-#  Finally, The "realm" section defines the realm, some options, and
-#  indicates which server pool should be used for the realm.
-#
-#  This change means that simple configurations now require multiple
-#  sections to define a realm.  However, complex configurations
-#  are much simpler than before, as multiple realms can share the same
-#  server pool.
-#
-#  That is, realms point to server pools, and server pools point to
-#  home servers.  Multiple realms can point to one server pool.  One
-#  server pool can point to multiple home servers.  Each home server
-#  can appear in one or more pools.
-#
-
-######################################################################
-#
-#  This section defines a "Home Server" which is another RADIUS
-#  server that gets sent proxied requests.  In earlier versions
-#  of FreeRADIUS, home servers were defined in "realm" sections,
-#  which was awkward.  In 2.0, they have been made independent
-#  from realms, which is better for a number of reasons.
-#
-home_server localhost {
-	#
-	#  Home servers can be sent Access-Request packets
-	#  or Accounting-Request packets.
-	#
-	#  Allowed values are:
-	#	auth	  - Handles Access-Request packets
-	#	acct	  - Handles Accounting-Request packets
-	#	auth+acct - Handles Access-Request packets at "port",
-	#		    and Accounting-Request packets at "port + 1"
-	#	coa	  - Handles CoA-Request and Disconnect-Request packets.
-	#		    See also raddb/sites-available/originate-coa
-	type = auth
-
-	#
-	#  Configure ONE OF the following entries:
-	#
-	#	IPv4 address
-	#
-	ipaddr = 127.0.0.1
-
-	#	OR IPv6 address
-	# ipv6addr = ::1
-
-	#	OR virtual server
-	# virtual_server = foo
-
-	#	Note that while both ipaddr and ipv6addr will accept
-	#	both addresses and host names, we do NOT recommend
-	#	using host names.  When you specify a host name, the
-	#	server has to do a DNS lookup to find the IP address
-	#	of the home server.  If the DNS server is slow or
-	#	unresponsive, it means that FreeRADIUS will NOT be
-	#	able to determine the address, and will therefore NOT
-	#	start.
-	#
-	#	Also, the mapping of host name to address is done ONCE
-	#	when the server starts.  If DNS is later updated to
-	#	change the address, FreeRADIUS will NOT discover that
-	#	until after a re-start, or a HUP.
-	#
-	#	If you specify a virtual_server here, then requests
-	#	will be proxied internally to that virtual server.
-	#	These requests CANNOT be proxied again, however.  The
-	#	intent is to have the local server handle packets
-	#	when all home servers are dead.
-	#
-	#	Requests proxied to a virtual server will be passed
-	#	through the pre-proxy and post-proxy sections, just
-	#	like any other request.  See also the sample "realm"
-	#	configuration, below.
-	#
-	#	None of the rest of the home_server configuration is used
-	#	for the "virtual_server" configuration.
-
-	#
-	#  The port to which packets are sent.
-	#
-	#  Usually 1812 for type "auth", and  1813 for type "acct".
-	#  Older servers may use 1645 and 1646.
-	#  Use 3799 for type "coa"
-	#
-	port = 1812
-
-	#
-	#  The transport protocol.
-	#
-	#  If unspecified, defaults to "udp", which is the traditional
-	#  RADIUS transport.  It may also be "tcp", in which case TCP
-	#  will be used to talk to this home server.
-	#
-	#  When home servers are put into pools, the pool can contain
-	#  home servers with both UDP and TCP transports.
-	#
-	#proto = udp
-
-	#
-	#  The shared secret use to "encrypt" and "sign" packets between
-	#  FreeRADIUS and the home server.
-	#
-	#  The secret can be any string, up to 8k characters in length.
-	#
-	#  Control codes can be entered vi octal encoding,
-	#	e.g. "\101\102" == "AB"
-	#  Quotation marks can be entered by escaping them,
-	#	e.g. "foo\"bar"
-	#  Spaces or other "special" characters can be entered
-	#  by putting quotes around the string.
-	#	e.g. "foo bar"
-	#	     "foo;bar"
-	#
-	secret = testing123
-
-	############################################################
-	#
-	#  The rest of the configuration items listed here are optional,
-	#  and do not have to appear in every home server definition.
-	#
-	############################################################
-
-	#
-	#  You can optionally specify the source IP address used when
-	#  proxying requests to this home server.  When the src_ipaddr
-	#  it set, the server will automatically create a proxy
-	#  listener for that IP address.
-	#
-	#  If you specify this field for one home server, you will
-	#  likely need to specify it for ALL home servers.
-	#
-	#  If you don't care about the source IP address, leave this
-	#  entry commented.
-	#
-#	src_ipaddr = 127.0.0.1
-
-	#
-	#  If the home server does not respond to a request within
-	#  this time, this server will initiate "zombie_period".
-	#
-	#  The response window is large because responses MAY be slow,
-	#  especially when proxying across the Internet.
-	#
-	#  Useful range of values: 5 to 60
-	response_window = 20
-
-	#
-	#  If you want the old behaviour of the server rejecting
-	#  proxied requests after "response_window" timeout, set
-	#  the following configuration item to "yes".
-	#
-	#  This configuration WILL be removed in a future release
-	#  If you believe you need it, email the freeradius-users
-	#  list, and explain why it should stay in the server.
-	#
-#	no_response_fail = no
-
-	#
-	#  If the home server does not respond to ANY packets during
-	#  the "zombie period", it will be considered to be dead.
-	#
-	#  A home server that is marked "zombie" will be used for
-	#  proxying as a low priority.  If there are live servers,
-	#  they will always be preferred to a zombie.  Requests will
-	#  be proxied to a zombie server ONLY when there are no
-	#  live servers.
-	#
-	#  Any request that is proxied to a home server will continue
-	#  to be sent to that home server until the home server is
-	#  marked dead.  At that point, it will fail over to another
-	#  server, if a live server is available.  If none is available,
-	#  then the "post-proxy-type fail" handler will be called.
-	#
-	#  If "status_check" below is something other than "none", then
-	#  the server will start sending status checks at the start of
-	#  the zombie period.  It will continue sending status checks
-	#  until the home server is marked "alive".
-	#
-	#  Useful range of values: 20 to 120
-	zombie_period = 40
-
-	############################################################
-	#
-	#  As of 2.0, FreeRADIUS supports RADIUS layer "status
-	#  checks".  These are used by a proxy server to see if a home
-	#  server is alive.
-	#
-	#  These status packets are sent ONLY if the proxying server
-	#  believes that the home server is dead.  They are NOT sent
-	#  if the proxying server believes that the home server is
-	#  alive.  They are NOT sent if the proxying server is not
-	#  proxying packets.
-	#
-	#  If the home server responds to the status check packet,
-	#  then it is marked alive again, and is returned to use.
-	#
-	############################################################
-
-	#
-	#  Some home servers do not support status checks via the
-	#  Status-Server packet.  Others may not have a "test" user
-	#  configured that can be used to query the server, to see if
-	#  it is alive.  For those servers, we have NO WAY of knowing
-	#  when it becomes alive again.  Therefore, after the server
-	#  has been marked dead, we wait a period of time, and mark
-	#  it alive again, in the hope that it has come back to
-	#  life.
-	#
-	#  If it has NOT come back to life, then FreeRADIUS will wait
-	#  for "zombie_period" before marking it dead again.  During
-	#  the "zombie_period", ALL AUTHENTICATIONS WILL FAIL, because
-	#  the home server is still dead.  There is NOTHING that can
-	#  be done about this, other than to enable the status checks,
-	#  as documented below.
-	#
-	#  e.g. if "zombie_period" is 40 seconds, and "revive_interval"
-	#  is 300 seconds, the for 40 seconds out of every 340, or about
-	#  10% of the time, all authentications will fail.
-	#
-	#  If the "zombie_period" and "revive_interval" configurations
-	#  are set smaller, than it is possible for up to 50% of
-	#  authentications to fail.
-	#
-	#  As a result, we recommend enabling status checks, and
-	#  we do NOT recommend using "revive_interval".
-	#
-	#  The "revive_interval" is used ONLY if the "status_check"
-	#  entry below is "none".  Otherwise, it will not be used,
-	#  and should be deleted.
-	#
-	#  Useful range of values: 60 to 3600
-	revive_interval = 120
-
-	#
-	#  The proxying server (i.e. this one) can do periodic status
-	#  checks to see if a dead home server has come back alive.
-	#
-	#  If set to "none", then the other configuration items listed
-	#  below are not used, and the "revive_interval" time is used
-	#  instead.
-	#
-	#  If set to "status-server", the Status-Server packets are
-	#  sent.  Many RADIUS servers support Status-Server.  If a
-	#  server does not support it, please contact the server
-	#  vendor and request that they add it.
-	#
-	#  If set to "request", then Access-Request, or Accounting-Request
-	#  packets are sent, depending on the "type" entry above (auth/acct).
-	#
-	#  Allowed values: none, status-server, request
-	status_check = status-server
-
-	#
-	#  If the home server does not support Status-Server packets,
-	#  then the server can still send Access-Request or
-	#  Accounting-Request packets, with a pre-defined user name.
-	#
-	#  This practice is NOT recommended, as it may potentially let
-	#  users gain network access by using these "test" accounts!
-	#
-	#  If it is used, we recommend that the home server ALWAYS
-	#  respond to these Access-Request status checks with
-	#  Access-Reject.  The status check just needs an answer, it
-	#  does not need an Access-Accept.
-	#
-	#  For Accounting-Request status checks, only the username
-	#  needs to be set.  The rest of the accounting attribute are
-	#  set to default values.  The home server that receives these
-	#  accounting packets SHOULD NOT treat them like normal user
-	#  accounting packets.  i.e It should probably NOT log them to
-	#  a database.
-	#
-	# username = "test_user_please_reject_me"
-	# password = "this is really secret"
-
-	#
-	#  Configure the interval between sending status check packets.
-	#
-	#  Setting it too low increases the probability of spurious
-	#  fail-over and fallback attempts.
-	#
-	#  Useful range of values: 6 to 120
-	check_interval = 30
-
-	#
-	#  Configure the number of status checks in a row that the
-	#  home server needs to respond to before it is marked alive.
-	#
-	#  If you want to mark a home server as alive after a short
-	#  time period of being responsive, it is best to use a small
-	#  "check_interval", and a large value for
-	#  "num_answers_to_alive".  Using a long "check_interval" and
-	#  a small number for "num_answers_to_alive" increases the
-	#  probability of spurious fail-over and fallback attempts.
-	#
-	#  Useful range of values: 3 to 10
-	num_answers_to_alive = 3
-
-	#
-	#  Limit the total number of outstanding packets to the home
-	#  server.
-	#
-	#  if ((#request sent) - (#requests received)) > max_outstanding
-	#	then stop sending more packets to the home server
-	#
-	#  This lets us gracefully fall over when the home server
-	#  is overloaded.
-	max_outstanding = 65536
-
-	#
-	#  The configuration items in the next sub-section are used ONLY
-	#  when "type = coa".  It is ignored for all other type of home
-	#  servers.
-	#
-	#  See RFC 5080 for the definitions of the following terms.
-	#  RAND is a function (internal to FreeRADIUS) returning
-	#  random numbers between -0.1 and +0.1
-	#
-	#  First Re-transmit occurs after:
-	#
-	#	 RT = IRT + RAND*IRT
-	#
-	#  Subsequent Re-transmits occur after:
-	#
-	#	RT = 2 * RTprev + RAND * RTprev
-	#
-	#  Re-transmits are capped at:
-	#
-	#	if (MRT && (RT > MRT)) RT = MRT + RAND * MRT
-	#
-	#  For a maximum number of attempts: MRC
-	#
-	#  For a maximum (total) period of time: MRD.
-	#
-	coa {
-		# Initial retransmit interval: 1..5
-		irt = 2
-
-		# Maximum Retransmit Timeout: 1..30 (0 == no maximum)
-		mrt = 16
-
-		# Maximum Retransmit Count: 1..20 (0 == retransmit forever)
-		mrc = 5
-
-		# Maximum Retransmit Duration: 5..60
-		mrd = 30
-	}
-
-	#
-	#  Connection limiting for home servers with "proto = tcp".
-	#
-	#  This section is ignored for other home servers.
-	#
-	limit {
-	      #
-	      #  Limit the number of TCP connections to the home server.
-	      #
-	      #  The default is 16.
-	      #  Setting this to 0 means "no limit"
-	      max_connections = 16
-
-	      #
-	      #  Limit the total number of requests sent over one
-	      #  TCP connection.  After this number of requests, the
-	      #  connection will be closed.  Any new packets that are
-	      #  proxied to the home server will result in a new TCP
-	      #  connection being made.
-	      #
-	      #  Setting this to 0 means "no limit"
-	      max_requests = 0
-
-	      #
-	      #  The lifetime, in seconds, of a TCP connection.  After
-	      #  this lifetime, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "forever".
-	      lifetime = 0
-
-	      #
-	      #  The idle timeout, in seconds, of a TCP connection.
-	      #  If no packets have been sent over the connection for
-	      #  this time, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "no timeout".
-	      idle_timeout = 0
-	}
-
-}
-
-# Sample virtual home server.
-#
-#
-#home_server virtual.example.com {
-#	    virtual_server = virtual.example.com
-#}
-
-######################################################################
-#
-#  This section defines a pool of home servers that is used
-#  for fail-over and load-balancing.  In earlier versions of
-#  FreeRADIUS, fail-over and load-balancing were defined per-realm.
-#  As a result, if a server had 5 home servers, each of which served
-#  the same 10 realms, you would need 50 "realm" entries.
-#
-#  In version 2.0, you would need 5 "home_server" sections,
-#  10 'realm" sections, and one "home_server_pool" section to tie the
-#  two together.
-#
-home_server_pool my_auth_failover {
-	#
-	#  The type of this pool controls how home servers are chosen.
-	#
-	#  fail-over - the request is sent to the first live
-	#  	home server in the list.  i.e. If the first home server
-	#	is marked "dead", the second one is chosen, etc.
-	#
-	#  load-balance - the least busy home server is chosen,
-	#	where "least busy" is counted by taking the number of
-	#	requests sent to that home server, and subtracting the
-	#	number of responses received from that home server.
-	#
-	#	If there are two or more servers with the same low
-	#	load, then one of those servers is chosen at random.
-	#	This configuration is most similar to the old
-	#	"round-robin" method, though it is not exactly the same.
-	#
-	#	Note that load balancing does not work well with EAP,
-	#	as EAP requires packets for an EAP conversation to be
-	#	sent to the same home server.  The load balancing method
-	#	does not keep state in between packets, meaning that
-	#	EAP packets for the same conversation may be sent to
-	#	different home servers.  This will prevent EAP from
-	#	working.
-	#
-	#	For non-EAP authentication methods, and for accounting
-	#	packets, we recommend using "load-balance".  It will
-	#	ensure the highest availability for your network.
-	#
-	#  client-balance - the home server is chosen by hashing the
-	#	source IP address of the packet.  If that home server
-	#	is down, the next one in the list is used, just as
-	#	with "fail-over".
-	#
-	#	There is no way of predicting which source IP will map
-	#	to which home server.
-	#
-	#	This configuration is most useful to do simple load
-	#	balancing for EAP sessions, as the EAP session will
-	#	always be sent to the same home server.
-	#
-	#  client-port-balance - the home server is chosen by hashing
-	#	the source IP address and source port of the packet.
-	#	If that home server is down, the next one in the list
-	#	is used, just as with "fail-over".
-	#
-	#	This method provides slightly better load balancing
-	#	for EAP sessions than "client-balance".  However, it
-	#	also means that authentication and accounting packets
-	#	for the same session MAY go to different home servers.
-	#
-	#  keyed-balance - the home server is chosen by hashing (FNV)
-	#	the contents of the Load-Balance-Key attribute from the
-	#	control items.  The  request is then sent to home server
-	#	chosen by taking:
-	#
-	#		server = (hash % num_servers_in_pool).
-	#
-	#	If there is no Load-Balance-Key in the control items,
-	#	the load balancing method is identical to "load-balance".
-	#
-	#	For most non-EAP authentication methods, The User-Name
-	#	attribute provides a good key.  An "unlang" policy can
-	#	be used to copy the User-Name to the Load-Balance-Key
-	#	attribute.  This method may not work for EAP sessions,
-	#	as the User-Name outside of the TLS tunnel is often
-	#	static, e.g. "anonymous@realm".
-	#
-	#
-	#  The default type is fail-over.
-	type = fail-over
-
-	#
-	#  A virtual_server may be specified here.  If so, the
-	#  "pre-proxy" and "post-proxy" sections are called when
-	#  the request is proxied, and when a response is received.
-	#
-	#  This lets you have one policy for all requests that are proxied
-	#  to a home server.  This policy is completely independent of
-	#  any policies used to receive, or process the request.
-	#
-	#virtual_server = pre_post_proxy_for_pool
-
-	#
-	#  Next, a list of one or more home servers.  The names
-	#  of the home servers are NOT the hostnames, but the names
-	#  of the sections.  (e.g. home_server foo {...} has name "foo".
-	#
-	#  Note that ALL home servers listed here have to be of the same
-	#  type.  i.e. they all have to be "auth", or they all have to
-	#  be "acct", or the all have to be "auth+acct".
-	#
-	home_server = localhost
-
-	#  Additional home servers can be listed.
-	#  There is NO LIMIT to the number of home servers that can
-	#  be listed, though using more than 10 or so will become
-	#  difficult to manage.
-	#
-	# home_server = foo.example.com
-	# home_server = bar.example.com
-	# home_server = baz.example.com
-	# home_server = ...
-
-
-	#
-	#  If ALL home servers are dead, then this "fallback" home server
-	#  is used.  If set, it takes precedence over any realm-based
-	#  fallback, such as the DEFAULT realm.
-	#
-	#  For reasons of stability, this home server SHOULD be a virtual
-	#  server.  Otherwise, the fallback may itself be dead!
-	#
-	#fallback = virtual.example.com
-}
-
-######################################################################
-#
-#
-#  This section defines a new-style "realm".  Note the in version 2.0,
-#  there are many fewer configuration items than in 1.x for a realm.
-#
-#  Automatic proxying is done via the "realms" module (see "man
-#  rlm_realm").  To manually proxy the request put this entry in the
-#  "users" file:
-
-#
-#
-#DEFAULT	Proxy-To-Realm := "realm_name"
-#
-#
-realm example.com {
-	#
-	#  Realms point to pools of home servers.
-#
-	#  For authentication, the "auth_pool" configuration item
-	#  should point to a "home_server_pool" that was previously
-	#  defined.  All of the home servers in the "auth_pool" must
-	#  be of type "auth".
-	#
-	#  For accounting, the "acct_pool" configuration item
-	#  should point to a "home_server_pool" that was previously
-	#  defined.  All of the home servers in the "acct_pool" must
-	#  be of type "acct".
-	#
-	#  If you have a "home_server_pool" where all of the home servers
-	#  are of type "auth+acct", you can just use the "pool"
-	#  configuration item, instead of specifying both "auth_pool"
-	#  and "acct_pool".
-
-	auth_pool = my_auth_failover
-#	acct_pool = acct
-
-	#  As of Version 3.0, the server can proxy CoA packets
-	#  based on the Operator-Name attribute.  This requires
-	#  that the "suffix" module be listed in the "recv-coa"
-	#  section.
-	#
-	#  See raddb/sites-available/coa
-	#
-#	coa_pool = name_of_coa_pool
-
-	#
-	#  Normally, when an incoming User-Name is matched against the
-	#  realm, the realm name is "stripped" off, and the "stripped"
-	#  user name is used to perform matches.
-	#
-	#  e.g. User-Name = "bob@example.com" will result in two new
-	#  attributes being created by the "realms" module:
-	#
-	#	Stripped-User-Name = "bob"
-	#	Realm = "example.com"
-	#
-	#  The Stripped-User-Name is then used as a key in the "users"
-	#  file, for example.
-	#
-	#  If you do not want this to happen, uncomment "nostrip" below.
-	#
-	# nostrip
-
-	#  There are no more configuration entries for a realm.
-}
-
-
-#
-#  This is a sample entry for iPass.
-#  Note that you have to define "ipass_auth_pool" and
-#  "ipass_acct_pool", along with home_servers for them, too.
-#
-#realm IPASS {
-#	nostrip
-#
-#	auth_pool = ipass_auth_pool
-#	acct_pool = ipass_acct_pool
-#}
-
-#
-#  This realm is used mainly to cancel proxying.  You can have
-#  the "realm suffix" module configured to proxy all requests for
-#  a realm, and then later cancel the proxying, based on other
-#  configuration.
-#
-#  For example, you want to terminate PEAP or EAP-TTLS locally,
-#  you can add the following to the "users" file:
-#
-#  DEFAULT EAP-Type == PEAP, Proxy-To-Realm := LOCAL
-#
-realm LOCAL {
-	#  If we do not specify a server pool, the realm is LOCAL, and
-	#  requests are not proxied to it.
-}
-
-#
-#  This realm is for requests which don't have an explicit realm
-#  prefix or suffix.  User names like "bob" will match this one.
-#
-#realm NULL {
-#	authhost	= radius.company.com:1600
-#	accthost	= radius.company.com:1601
-#	secret		= testing123
-#}
-
-#
-#  This realm is for ALL OTHER requests.
-#
-#realm DEFAULT {
-#	authhost	= radius.company.com:1600
-#	accthost	= radius.company.com:1601
-#	secret		= testing123
-#}
-
-
-#  This realm "proxies" requests internally to a virtual server.
-#  The pre-proxy and post-proxy sections are run just as with any
-#  other kind of home server.  The virtual server then receives
-#  the request, and replies, just as with any other packet.
-#
-#  Once proxied internally like this, the request CANNOT be proxied
-#  internally or externally.
-#
-#realm virtual.example.com {
-#	virtual_server = virtual.example.com
-#}
-#
-
-#
-#  Regular expressions may also be used as realm names.  If these are used,
-#  then the "find matching realm" process is as follows:
-#
-#    1) Look for a non-regex realm with an *exact* match for the name.
-#       If found, it is used in preference to any regex matching realm.
-#
-#    2) Look for a regex realm, in the order that they are listed
-#       in the configuration files.  Any regex match is performed in
-#	a case-insensitive fashion.
-#
-#    3) If no realm is found, return the DEFAULT realm, if any.
-#
-#  The order of the realms matters in step (2).  For example, defining
-#  two realms ".*\.example.net$" and ".*\.test\.example\.net$" will result in
-#  the second realm NEVER matching.  This is because all of the realms
-#  which match the second regex also match the first one.  Since the
-#  first regex matches, it is returned.
-#
-#  The solution is to list the realms in the opposite order,. e.g.
-#  ".*\.test\.example.net$", followed by ".*\.example\.net$".
-#
-#
-#  Some helpful rules:
-#
-#   - always place a '~' character at the start of the realm name.
-#     This signifies that it is a regex match, and not an exact match
-#     for the realm.
-#
-#   - place the regex in double quotes.  This helps the configuration
-#     file parser ignore any "special" characters in the regex.
-#     Yes, this rule is different than the normal "unlang" rules for
-#     regular expressions.  That may be fixed in a future release.
-#
-#   - use two back-slashes '\\' whenever you need one backslash in the
-#     regex.  e.g. "~.*\\.example\\.net$", and not "~\.example\.net$".
-#     This is because the regex is in a double-quoted string, and normal
-#     rules apply for double-quoted strings.
-#
-#   - If you are matching domain names, use two backslashes in front of
-#     every '.' (dot or period).  This is because '.' has special meaning
-#     in a regular expression: match any character.  If you do not do this,
-#     then "~.*.example.net$" will match "fooXexampleYnet", which is likely
-#     not what you want
-#
-#   - If you are matching domain names, put a '$' at the end of the regex
-#     that matches the domain name.  This tells the regex matching code
-#     that the realm ENDS with the domain name, so it does not match
-#     realms with the domain name in the middle.  e.g. "~.*\\.example\\.net"
-#     will match "test.example.netFOO", which is likely not what you want.
-#     Using "~(.*\\.)example\\.net$" is better.
-#
-#  The more regex realms that are defined, the more time it takes to
-#  process them.  You should define as few regex realms as possible
-#  in order to maximize server performance.
-#
-#realm "~(.*\\.)*example\\.net$" {
-#      auth_pool = my_auth_failover
-#}
diff --git a/src/test/setup/radius-config/freeradius/radiusd.conf b/src/test/setup/radius-config/freeradius/radiusd.conf
deleted file mode 100644
index 327b10b..0000000
--- a/src/test/setup/radius-config/freeradius/radiusd.conf
+++ /dev/null
@@ -1,772 +0,0 @@
-# -*- text -*-
-##
-## radiusd.conf	-- FreeRADIUS server configuration file - 3.0.3
-##
-##	http://www.freeradius.org/
-##	$Id: 307ae108f579b9c339e6ba819387ff7ad8baff87 $
-##
-
-######################################################################
-#
-#	Read "man radiusd" before editing this file.  See the section
-#	titled DEBUGGING.  It outlines a method where you can quickly
-#	obtain the configuration you want, without running into
-#	trouble.
-#
-#	Run the server in debugging mode, and READ the output.
-#
-#		$ radiusd -X
-#
-#	We cannot emphasize this point strongly enough.  The vast
-#	majority of problems can be solved by carefully reading the
-#	debugging output, which includes warnings about common issues,
-#	and suggestions for how they may be fixed.
-#
-#	There may be a lot of output, but look carefully for words like:
-#	"warning", "error", "reject", or "failure".  The messages there
-#	will usually be enough to guide you to a solution.
-#
-#	If you are going to ask a question on the mailing list, then
-#	explain what you are trying to do, and include the output from
-#	debugging mode (radiusd -X).  Failure to do so means that all
-#	of the responses to your question will be people telling you
-#	to "post the output of radiusd -X".
-
-######################################################################
-#
-#  	The location of other config files and logfiles are declared
-#  	in this file.
-#
-#  	Also general configuration for modules can be done in this
-#  	file, it is exported through the API to modules that ask for
-#  	it.
-#
-#	See "man radiusd.conf" for documentation on the format of this
-#	file.  Note that the individual configuration items are NOT
-#	documented in that "man" page.  They are only documented here,
-#	in the comments.
-#
-#	The "unlang" policy language can be used to create complex
-#	if / else policies.  See "man unlang" for details.
-#
-
-prefix = /usr
-exec_prefix = /usr
-sysconfdir = /etc
-localstatedir = /var
-sbindir = ${exec_prefix}/sbin
-logdir = /var/log/freeradius
-raddbdir = /etc/freeradius
-radacctdir = ${logdir}/radacct
-
-#
-#  name of the running server.  See also the "-n" command-line option.
-name = radiusd
-
-#  Location of config and logfiles.
-confdir = ${raddbdir}
-modconfdir = ${confdir}/mods-config
-certdir = ${confdir}/certs_2
-cadir   = ${confdir}/certs_2
-run_dir = ${localstatedir}/run/${name}
-
-# Should likely be ${localstatedir}/lib/radiusd
-db_dir = ${raddbdir}
-
-#
-# libdir: Where to find the rlm_* modules.
-#
-#   This should be automatically set at configuration time.
-#
-#   If the server builds and installs, but fails at execution time
-#   with an 'undefined symbol' error, then you can use the libdir
-#   directive to work around the problem.
-#
-#   The cause is usually that a library has been installed on your
-#   system in a place where the dynamic linker CANNOT find it.  When
-#   executing as root (or another user), your personal environment MAY
-#   be set up to allow the dynamic linker to find the library.  When
-#   executing as a daemon, FreeRADIUS MAY NOT have the same
-#   personalized configuration.
-#
-#   To work around the problem, find out which library contains that symbol,
-#   and add the directory containing that library to the end of 'libdir',
-#   with a colon separating the directory names.  NO spaces are allowed.
-#
-#   e.g. libdir = /usr/local/lib:/opt/package/lib
-#
-#   You can also try setting the LD_LIBRARY_PATH environment variable
-#   in a script which starts the server.
-#
-#   If that does not work, then you can re-configure and re-build the
-#   server to NOT use shared libraries, via:
-#
-#	./configure --disable-shared
-#	make
-#	make install
-#
-libdir = /usr/lib/freeradius
-
-#  pidfile: Where to place the PID of the RADIUS server.
-#
-#  The server may be signalled while it's running by using this
-#  file.
-#
-#  This file is written when ONLY running in daemon mode.
-#
-#  e.g.:  kill -HUP `cat /var/run/radiusd/radiusd.pid`
-#
-pidfile = ${run_dir}/${name}.pid
-
-#  panic_action: Command to execute if the server dies unexpectedly.
-#
-#  FOR PRODUCTION SYSTEMS, ACTIONS SHOULD ALWAYS EXIT.
-#  AN INTERACTIVE ACTION MEANS THE SERVER IS NOT RESPONDING TO REQUESTS.
-#  AN INTERACTICE ACTION MEANS THE SERVER WILL NOT RESTART.
-#
-#  THE SERVER MUST NOT BE ALLOWED EXECUTE UNTRUSTED PANIC ACTION CODE
-#  PATTACH CAN BE USED AS AN ATTACK VECTOR.
-#
-#  The panic action is a command which will be executed if the server
-#  receives a fatal, non user generated signal, i.e. SIGSEGV, SIGBUS,
-#  SIGABRT or SIGFPE.
-#
-#  This can be used to start an interactive debugging session so
-#  that information regarding the current state of the server can
-#  be acquired.
-#
-#  The following string substitutions are available:
-#  - %e   The currently executing program e.g. /sbin/radiusd
-#  - %p   The PID of the currently executing program e.g. 12345
-#
-#  Standard ${} substitutions are also allowed.
-#
-#  An example panic action for opening an interactive session in GDB would be:
-#
-#panic_action = "gdb %e %p"
-#
-#  Again, don't use that on a production system.
-#
-#  An example panic action for opening an automated session in GDB would be:
-#
-#panic_action = "gdb -silent -x ${raddbdir}/panic.gdb %e %p 2>&1 | tee ${logdir}/gdb-${name}-%p.log"
-#
-#  That command can be used on a production system.
-#
-
-#  max_request_time: The maximum time (in seconds) to handle a request.
-#
-#  Requests which take more time than this to process may be killed, and
-#  a REJECT message is returned.
-#
-#  WARNING: If you notice that requests take a long time to be handled,
-#  then this MAY INDICATE a bug in the server, in one of the modules
-#  used to handle a request, OR in your local configuration.
-#
-#  This problem is most often seen when using an SQL database.  If it takes
-#  more than a second or two to receive an answer from the SQL database,
-#  then it probably means that you haven't indexed the database.  See your
-#  SQL server documentation for more information.
-#
-#  Useful range of values: 5 to 120
-#
-max_request_time = 30
-
-#  cleanup_delay: The time to wait (in seconds) before cleaning up
-#  a reply which was sent to the NAS.
-#
-#  The RADIUS request is normally cached internally for a short period
-#  of time, after the reply is sent to the NAS.  The reply packet may be
-#  lost in the network, and the NAS will not see it.  The NAS will then
-#  re-send the request, and the server will respond quickly with the
-#  cached reply.
-#
-#  If this value is set too low, then duplicate requests from the NAS
-#  MAY NOT be detected, and will instead be handled as separate requests.
-#
-#  If this value is set too high, then the server will cache too many
-#  requests, and some new requests may get blocked.  (See 'max_requests'.)
-#
-#  Useful range of values: 2 to 10
-#
-cleanup_delay = 5
-
-#  max_requests: The maximum number of requests which the server keeps
-#  track of.  This should be 256 multiplied by the number of clients.
-#  e.g. With 4 clients, this number should be 1024.
-#
-#  If this number is too low, then when the server becomes busy,
-#  it will not respond to any new requests, until the 'cleanup_delay'
-#  time has passed, and it has removed the old requests.
-#
-#  If this number is set too high, then the server will use a bit more
-#  memory for no real benefit.
-#
-#  If you aren't sure what it should be set to, it's better to set it
-#  too high than too low.  Setting it to 1000 per client is probably
-#  the highest it should be.
-#
-#  Useful range of values: 256 to infinity
-#
-max_requests = 1024
-
-#  hostname_lookups: Log the names of clients or just their IP addresses
-#  e.g., www.freeradius.org (on) or 206.47.27.232 (off).
-#
-#  The default is 'off' because it would be overall better for the net
-#  if people had to knowingly turn this feature on, since enabling it
-#  means that each client request will result in AT LEAST one lookup
-#  request to the nameserver.   Enabling hostname_lookups will also
-#  mean that your server may stop randomly for 30 seconds from time
-#  to time, if the DNS requests take too long.
-#
-#  Turning hostname lookups off also means that the server won't block
-#  for 30 seconds, if it sees an IP address which has no name associated
-#  with it.
-#
-#  allowed values: {no, yes}
-#
-hostname_lookups = no
-
-#
-#  Logging section.  The various "log_*" configuration items
-#  will eventually be moved here.
-#
-log {
-	#
-	#  Destination for log messages.  This can be one of:
-	#
-	#	files - log to "file", as defined below.
-	#	syslog - to syslog (see also the "syslog_facility", below.
-	#	stdout - standard output
-	#	stderr - standard error.
-	#
-	#  The command-line option "-X" over-rides this option, and forces
-	#  logging to go to stdout.
-	#
-	destination = files
-
-	#
-	#  Highlight important messages sent to stderr and stdout.
-	#
-	#  Option will be ignored (disabled) if output if TERM is not
-	#  an xterm or output is not to a TTY.
-	#
-	colourise = yes
-
-	#
-	#  The logging messages for the server are appended to the
-	#  tail of this file if destination == "files"
-	#
-	#  If the server is running in debugging mode, this file is
-	#  NOT used.
-	#
-	file = ${logdir}/radius.log
-
-	#
-	#  If this configuration parameter is set, then log messages for
-	#  a *request* go to this file, rather than to radius.log.
-	#
-	#  i.e. This is a log file per request, once the server has accepted
-	#  the request as being from a valid client.  Messages that are
-	#  not associated with a request still go to radius.log.
-	#
-	#  Not all log messages in the server core have been updated to use
-	#  this new internal API.  As a result, some messages will still
-	#  go to radius.log.  Please submit patches to fix this behavior.
-	#
-	#  The file name is expanded dynamically.  You should ONLY user
-	#  server-side attributes for the filename (e.g. things you control).
-	#  Using this feature MAY also slow down the server substantially,
-	#  especially if you do thinks like SQL calls as part of the
-	#  expansion of the filename.
-	#
-	#  The name of the log file should use attributes that don't change
-	#  over the lifetime of a request, such as User-Name,
-	#  Virtual-Server or Packet-Src-IP-Address.  Otherwise, the log
-	#  messages will be distributed over multiple files.
-	#
-	#  Logging can be enabled for an individual request by a special
-	#  dynamic expansion macro:  %{debug: 1}, where the debug level
-	#  for this request is set to '1' (or 2, 3, etc.).  e.g.
-	#
-	#	...
-	#	update control {
-	#	       Tmp-String-0 = "%{debug:1}"
-	#	}
-	#	...
-	#
-	#  The attribute that the value is assigned to is unimportant,
-	#  and should be a "throw-away" attribute with no side effects.
-	#
-	#requests = ${logdir}/radiusd-%{%{Virtual-Server}:-DEFAULT}-%Y%m%d.log
-
-	#
-	#  Which syslog facility to use, if ${destination} == "syslog"
-	#
-	#  The exact values permitted here are OS-dependent.  You probably
-	#  don't want to change this.
-	#
-	syslog_facility = daemon
-
-	#  Log the full User-Name attribute, as it was found in the request.
-	#
-	# allowed values: {no, yes}
-	#
-	stripped_names = no
-
-	#  Log authentication requests to the log file.
-	#
-	#  allowed values: {no, yes}
-	#
-	auth = no
-
-	#  Log passwords with the authentication requests.
-	#  auth_badpass  - logs password if it's rejected
-	#  auth_goodpass - logs password if it's correct
-	#
-	#  allowed values: {no, yes}
-	#
-	auth_badpass = no
-	auth_goodpass = no
-
-	#  Log additional text at the end of the "Login OK" messages.
-	#  for these to work, the "auth" and "auth_goodpass" or "auth_badpass"
-	#  configurations above have to be set to "yes".
-	#
-	#  The strings below are dynamically expanded, which means that
-	#  you can put anything you want in them.  However, note that
-	#  this expansion can be slow, and can negatively impact server
-	#  performance.
-	#
-#	msg_goodpass = ""
-#	msg_badpass = ""
-
-	#  The message when the user exceeds the Simultaneous-Use limit.
-	#
-	msg_denied = "You are already logged in - access denied"
-}
-
-#  The program to execute to do concurrency checks.
-checkrad = ${sbindir}/checkrad
-
-# SECURITY CONFIGURATION
-#
-#  There may be multiple methods of attacking on the server.  This
-#  section holds the configuration items which minimize the impact
-#  of those attacks
-#
-security {
-	#  chroot: directory where the server does "chroot".
-	#
-	#  The chroot is done very early in the process of starting
-	#  the server.  After the chroot has been performed it
-	#  switches to the "user" listed below (which MUST be
-	#  specified).  If "group" is specified, it switches to that
-	#  group, too.  Any other groups listed for the specified
-	#  "user" in "/etc/group" are also added as part of this
-	#  process.
-	#
-	#  The current working directory (chdir / cd) is left
-	#  *outside* of the chroot until all of the modules have been
-	#  initialized.  This allows the "raddb" directory to be left
-	#  outside of the chroot.  Once the modules have been
-	#  initialized, it does a "chdir" to ${logdir}.  This means
-	#  that it should be impossible to break out of the chroot.
-	#
-	#  If you are worried about security issues related to this
-	#  use of chdir, then simply ensure that the "raddb" directory
-	#  is inside of the chroot, end be sure to do "cd raddb"
-	#  BEFORE starting the server.
-	#
-	#  If the server is statically linked, then the only files
-	#  that have to exist in the chroot are ${run_dir} and
-	#  ${logdir}.  If you do the "cd raddb" as discussed above,
-	#  then the "raddb" directory has to be inside of the chroot
-	#  directory, too.
-	#
-#	chroot = /path/to/chroot/directory
-
-	# user/group: The name (or #number) of the user/group to run radiusd as.
-	#
-	#   If these are commented out, the server will run as the
-	#   user/group that started it.  In order to change to a
-	#   different user/group, you MUST be root ( or have root
-	#   privileges ) to start the server.
-	#
-	#   We STRONGLY recommend that you run the server with as few
-	#   permissions as possible.  That is, if you're not using
-	#   shadow passwords, the user and group items below should be
-	#   set to radius'.
-	#
-	#  NOTE that some kernels refuse to setgid(group) when the
-	#  value of (unsigned)group is above 60000; don't use group
-	#  "nobody" on these systems!
-	#
-	#  On systems with shadow passwords, you might have to set
-	#  'group = shadow' for the server to be able to read the
-	#  shadow password file.  If you can authenticate users while
-	#  in debug mode, but not in daemon mode, it may be that the
-	#  debugging mode server is running as a user that can read
-	#  the shadow info, and the user listed below can not.
-	#
-	#  The server will also try to use "initgroups" to read
-	#  /etc/groups.  It will join all groups where "user" is a
-	#  member.  This can allow for some finer-grained access
-	#  controls.
-	#
-#	user = radius
-#	group = radius
-
-	#  Core dumps are a bad thing.  This should only be set to
-	#  'yes' if you're debugging a problem with the server.
-	#
-	#  allowed values: {no, yes}
-	#
-	allow_core_dumps = no
-
-	#
-	#  max_attributes: The maximum number of attributes
-	#  permitted in a RADIUS packet.  Packets which have MORE
-	#  than this number of attributes in them will be dropped.
-	#
-	#  If this number is set too low, then no RADIUS packets
-	#  will be accepted.
-	#
-	#  If this number is set too high, then an attacker may be
-	#  able to send a small number of packets which will cause
-	#  the server to use all available memory on the machine.
-	#
-	#  Setting this number to 0 means "allow any number of attributes"
-	max_attributes = 200
-
-	#
-	#  reject_delay: When sending an Access-Reject, it can be
-	#  delayed for a few seconds.  This may help slow down a DoS
-	#  attack.  It also helps to slow down people trying to brute-force
-	#  crack a users password.
-	#
-	#  Setting this number to 0 means "send rejects immediately"
-	#
-	#  If this number is set higher than 'cleanup_delay', then the
-	#  rejects will be sent at 'cleanup_delay' time, when the request
-	#  is deleted from the internal cache of requests.
-	#
-	#  Useful ranges: 1 to 5
-	reject_delay = 1
-
-	#
-	#  status_server: Whether or not the server will respond
-	#  to Status-Server requests.
-	#
-	#  When sent a Status-Server message, the server responds with
-	#  an Access-Accept or Accounting-Response packet.
-	#
-	#  This is mainly useful for administrators who want to "ping"
-	#  the server, without adding test users, or creating fake
-	#  accounting packets.
-	#
-	#  It's also useful when a NAS marks a RADIUS server "dead".
-	#  The NAS can periodically "ping" the server with a Status-Server
-	#  packet.  If the server responds, it must be alive, and the
-	#  NAS can start using it for real requests.
-	#
-	#  See also raddb/sites-available/status
-	#
-	status_server = yes
-
-	#
-	#  allow_vulnerable_openssl = yes
-	#  versions of OpenSSL known to have critical vulnerabilities.
-	#
-	#  This check is based on the version number reported by libssl
-	#  and may not reflect patches applied to libssl by
-	#  distribution maintainers.
-	#
-	allow_vulnerable_openssl = yes
-}
-
-# PROXY CONFIGURATION
-#
-#  proxy_requests: Turns proxying of RADIUS requests on or off.
-#
-#  The server has proxying turned on by default.  If your system is NOT
-#  set up to proxy requests to another server, then you can turn proxying
-#  off here.  This will save a small amount of resources on the server.
-#
-#  If you have proxying turned off, and your configuration files say
-#  to proxy a request, then an error message will be logged.
-#
-#  To disable proxying, change the "yes" to "no", and comment the
-#  $INCLUDE line.
-#
-#  allowed values: {no, yes}
-#
-proxy_requests  = yes
-$INCLUDE proxy.conf
-
-
-# CLIENTS CONFIGURATION
-#
-#  Client configuration is defined in "clients.conf".
-#
-
-#  The 'clients.conf' file contains all of the information from the old
-#  'clients' and 'naslist' configuration files.  We recommend that you
-#  do NOT use 'client's or 'naslist', although they are still
-#  supported.
-#
-#  Anything listed in 'clients.conf' will take precedence over the
-#  information from the old-style configuration files.
-#
-$INCLUDE clients.conf
-
-
-# THREAD POOL CONFIGURATION
-#
-#  The thread pool is a long-lived group of threads which
-#  take turns (round-robin) handling any incoming requests.
-#
-#  You probably want to have a few spare threads around,
-#  so that high-load situations can be handled immediately.  If you
-#  don't have any spare threads, then the request handling will
-#  be delayed while a new thread is created, and added to the pool.
-#
-#  You probably don't want too many spare threads around,
-#  otherwise they'll be sitting there taking up resources, and
-#  not doing anything productive.
-#
-#  The numbers given below should be adequate for most situations.
-#
-thread pool {
-	#  Number of servers to start initially --- should be a reasonable
-	#  ballpark figure.
-	start_servers = 5
-
-	#  Limit on the total number of servers running.
-	#
-	#  If this limit is ever reached, clients will be LOCKED OUT, so it
-	#  should NOT BE SET TOO LOW.  It is intended mainly as a brake to
-	#  keep a runaway server from taking the system with it as it spirals
-	#  down...
-	#
-	#  You may find that the server is regularly reaching the
-	#  'max_servers' number of threads, and that increasing
-	#  'max_servers' doesn't seem to make much difference.
-	#
-	#  If this is the case, then the problem is MOST LIKELY that
-	#  your back-end databases are taking too long to respond, and
-	#  are preventing the server from responding in a timely manner.
-	#
-	#  The solution is NOT do keep increasing the 'max_servers'
-	#  value, but instead to fix the underlying cause of the
-	#  problem: slow database, or 'hostname_lookups=yes'.
-	#
-	#  For more information, see 'max_request_time', above.
-	#
-	max_servers = 32
-
-	#  Server-pool size regulation.  Rather than making you guess
-	#  how many servers you need, FreeRADIUS dynamically adapts to
-	#  the load it sees, that is, it tries to maintain enough
-	#  servers to handle the current load, plus a few spare
-	#  servers to handle transient load spikes.
-	#
-	#  It does this by periodically checking how many servers are
-	#  waiting for a request.  If there are fewer than
-	#  min_spare_servers, it creates a new spare.  If there are
-	#  more than max_spare_servers, some of the spares die off.
-	#  The default values are probably OK for most sites.
-	#
-	min_spare_servers = 3
-	max_spare_servers = 10
-
-	#  When the server receives a packet, it places it onto an
-	#  internal queue, where the worker threads (configured above)
-	#  pick it up for processing.  The maximum size of that queue
-	#  is given here.
-	#
-	#  When the queue is full, any new packets will be silently
-	#  discarded.
-	#
-	#  The most common cause of the queue being full is that the
-	#  server is dependent on a slow database, and it has received
-	#  a large "spike" of traffic.  When that happens, there is
-	#  very little you can do other than make sure the server
-	#  receives less traffic, or make sure that the database can
-	#  handle the load.
-	#
-#	max_queue_size = 65536
-
-	#  There may be memory leaks or resource allocation problems with
-	#  the server.  If so, set this value to 300 or so, so that the
-	#  resources will be cleaned up periodically.
-	#
-	#  This should only be necessary if there are serious bugs in the
-	#  server which have not yet been fixed.
-	#
-	#  '0' is a special value meaning 'infinity', or 'the servers never
-	#  exit'
-	max_requests_per_server = 0
-
-	#  Automatically limit the number of accounting requests.
-	#  This configuration item tracks how many requests per second
-	#  the server can handle.  It does this by tracking the
-	#  packets/s received by the server for processing, and
-	#  comparing that to the packets/s handled by the child
-	#  threads.
-	#
-
-	#  If the received PPS is larger than the processed PPS, *and*
-	#  the queue is more than half full, then new accounting
-	#  requests are probabilistically discarded.  This lowers the
-	#  number of packets that the server needs to process.  Over
-	#  time, the server will "catch up" with the traffic.
-	#
-	#  Throwing away accounting packets is usually safe and low
-	#  impact.  The NAS will retransmit them in a few seconds, or
-	#  even a few minutes.  Vendors should read RFC 5080 Section 2.2.1
-	#  to see how accounting packets should be retransmitted.  Using
-	#  any other method is likely to cause network meltdowns.
-	#
-	auto_limit_acct = no
-}
-
-# MODULE CONFIGURATION
-#
-#  The names and configuration of each module is located in this section.
-#
-#  After the modules are defined here, they may be referred to by name,
-#  in other sections of this configuration file.
-#
-modules {
-	#
-	#  Each module has a configuration as follows:
-	#
-	#	name [ instance ] {
-	#		config_item = value
-	#		...
-	#	}
-	#
-	#  The 'name' is used to load the 'rlm_name' library
-	#  which implements the functionality of the module.
-	#
-	#  The 'instance' is optional.  To have two different instances
-	#  of a module, it first must be referred to by 'name'.
-	#  The different copies of the module are then created by
-	#  inventing two 'instance' names, e.g. 'instance1' and 'instance2'
-	#
-	#  The instance names can then be used in later configuration
-	#  INSTEAD of the original 'name'.  See the 'radutmp' configuration
-	#  for an example.
-	#
-
-	#
-	#  As of 3.0, modules are in mods-enabled/.  Files matching
-	#  the regex /[a-zA-Z0-9_.]+/ are loaded.  The modules are
-	#  initialized ONLY if they are referenced in a processing
-	#  section, such as authorize, authenticate, accounting,
-	#  pre/post-proxy, etc.
-	#
-	$INCLUDE mods-enabled/
-}
-
-# Instantiation
-#
-#  This section orders the loading of the modules.  Modules
-#  listed here will get loaded BEFORE the later sections like
-#  authorize, authenticate, etc. get examined.
-#
-#  This section is not strictly needed.  When a section like
-#  authorize refers to a module, it's automatically loaded and
-#  initialized.  However, some modules may not be listed in any
-#  of the following sections, so they can be listed here.
-#
-#  Also, listing modules here ensures that you have control over
-#  the order in which they are initialized.  If one module needs
-#  something defined by another module, you can list them in order
-#  here, and ensure that the configuration will be OK.
-#
-#  After the modules listed here have been loaded, all of the modules
-#  in the "mods-enabled" directory will be loaded.  Loading the
-#  "mods-enabled" directory means that unlike Version 2, you usually
-#  don't need to list modules here.
-#
-instantiate {
-	#
-	# We list the counter module here so that it registers
-	# the check_name attribute before any module which sets
-	# it
-#	daily
-
-	# subsections here can be thought of as "virtual" modules.
-	#
-	# e.g. If you have two redundant SQL servers, and you want to
-	# use them in the authorize and accounting sections, you could
-	# place a "redundant" block in each section, containing the
-	# exact same text.  Or, you could uncomment the following
-	# lines, and list "redundant_sql" in the authorize and
-	# accounting sections.
-	#
-	#redundant redundant_sql {
-	#	sql1
-	#	sql2
-	#}
-}
-
-######################################################################
-#
-#  Policies are virtual modules, similar to those defined in the
-#  "instantiate" section above.
-#
-#  Defining a policy in one of the policy.d files means that it can be
-#  referenced in multiple places as a *name*, rather than as a series of
-#  conditions to match, and actions to take.
-#
-#  Policies are something like subroutines in a normal language, but
-#  they cannot be called recursively. They MUST be defined in order.
-#  If policy A calls policy B, then B MUST be defined before A.
-#
-######################################################################
-policy {
-	$INCLUDE policy.d/
-}
-
-######################################################################
-#
-#  SNMP notifications.  Uncomment the following line to enable
-#  snmptraps.  Note that you MUST also configure the full path
-#  to the "snmptrap" command in the "trigger.conf" file.
-#
-#$INCLUDE trigger.conf
-
-######################################################################
-#
-#	Load virtual servers.
-#
-#	This next $INCLUDE line loads files in the directory that
-#	match the regular expression: /[a-zA-Z0-9_.]+/
-#
-#	It allows you to define new virtual servers simply by placing
-#	a file into the raddb/sites-enabled/ directory.
-#
-$INCLUDE sites-enabled/
-
-######################################################################
-#
-#	All of the other configuration sections like "authorize {}",
-#	"authenticate {}", "accounting {}", have been moved to the
-#	the file:
-#
-#		raddb/sites-available/default
-#
-#	This is the "default" virtual server that has the same
-#	configuration as in version 1.0.x and 1.1.x.  The default
-#	installation enables this virtual server.  You should
-#	edit it to create policies for your local site.
-#
-#	For more documentation on virtual servers, see:
-#
-#		raddb/sites-available/README
-#
-######################################################################
diff --git a/src/test/setup/radius-config/freeradius/sites-available/README b/src/test/setup/radius-config/freeradius/sites-available/README
deleted file mode 100644
index 55036f0..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/README
+++ /dev/null
@@ -1,335 +0,0 @@
-1.  Virtual Servers.
-
-  FreeRADIUS 2.0 supports virtual servers.  This is probably the
-single largest change that is NOT backwards compatible with 1.x.
-
-  The virtual servers do NOT have to be set up with the
-"sites-available" and "sites-enabled" directories.  You can still have
-one "radiusd.conf" file, and put the server configuration there:
-
-	...
-	server {
-		authorize {
-			...
-		}
-		authenticate {
-			...
-		}
-		...
-	}
-	...
-
-  The power of virtual servers lies in their ability to separate
-policies.  A policy can be placed into a virtual server, where it is
-guaranteed to affect only the requests that are passed through that
-virtual server.  In 1.x, the policies were global, and it sometimes
-took much effort to write a policy so that it only applied in certain
-limited situations.
-
-
-2.  What do we mean by "virtual server"?
-
-
-  A virtual server is a (nearly complete) RADIUS server, just like a
-configuration for FreeRADIUS 1.x.  However, FreeRADIUS can now run
-multiple virtual servers at the same time.  The virtual servers can
-even proxy requests to each other!
-
-  The simplest way to create a virtual server is to take the all of
-the request processing sections from radius.conf, ("authorize" ,
-"authenticate", etc.) and wrap them in a "server {}" block, as above.
-
-  You can create another virtual server by:
-
-    1) defining a new "server foo {...}" section in radiusd.conf
-    2) Putting the normal "authorize", etc. sections inside of it
-    3) Adding a "listen" section *inside* of the "server" section.
-
-  e.g.
-
-	...
-	server foo {
-		listen {
-			ipaddr = 127.0.0.1
-			port = 2000
-			type = auth
-		}
-
-		authorize {
-			update control {
-				Cleartext-Password := "bob"
-			}
-			pap
-		}
-
-		authenticate {
-			pap
-		}
-	}
-	...
-
-  With that text added to "radiusd.conf", run the server in debugging
-mode (radiusd -X), and in another terminal window, type:
-
-$ radtest bob bob localhost:2000 0 testing123
-
-  You should see the server return an Access-Accept.
-
-
-3. Capabilities and limitations
-
-
-  The only sub-sections that can appear in a virtual server section
-are:
-
-	listen
-	client
-	authorize
-	authenticate
-	post-auth
-	pre-proxy
-	post-proxy
-	preacct
-	accounting
-	session
-
-  All other configuration parameters (modules, etc.) are global.
-
-  Inside of a virtual server, the authorize, etc. sections have their
-normal meaning, and can contain anything that an authorize section
-could contain in 1.x.
-
-  When a "listen" section is inside of a virtual server definition, it
-means that all requests sent to that IP/port will be processed through
-the virtual server.  There cannot be two "listen" sections with the
-same IP address and port number.
-
-  When a "client" section is inside of a virtual server definition, it
-means that that client is known only to the "listen" sections that are
-also inside of that virtual server.  Not only is this client
-definition available only to this virtual server, but the details of
-the client configuration is also available only to this virtual
-server.
-
-  i.e. Two virtual servers can listen on different IP address and
-ports, but both can have a client with IP address 127.0.0.1.  The
-shared secret for that client can be different for each virtual
-server.
-
-
-4. More complex "listen" capabilities
-
-  The "listen" sections have a few additional configuration items that
-were not in 1.x, and were not mentioned above.  These configuration
-items enable almost any mapping of IP / port to clients to virtual
-servers.
-
-  The configuration items are:
-
-	virtual_server = <name>
-
-		If set, all requests sent to this IP / port are processed
-		through the named virtual server.
-
-		This directive can be used only for "listen" sections
-		that are global.  i.e. It CANNOT be used if the
-		"listen" section is inside of a virtual server.
-
-	clients = <name>
-
-		If set, the "listen" section looks for a "clients" section:
-
-			clients <name> {
-				...
-			}
-
-		It looks inside of that named "clients" section for
-		"client" subsections, at least one of which must
-		exist.  Each client in that section is added to the
-		list of known clients for this IP / port.  No other
-		clients are known.
-
-		If it is set, it over-rides the list of clients (if
-		any) in the same virtual server.  Note that the
-		clients are NOT additive!
-
-		If it is not set, then the clients from the current
-		virtual server (if any) are used.  If there are no
-		clients in this virtual server, then the global
-		clients are used.
-
-		i.e. The most specific directive is used:
-			* configuration in this "listen" section
-			* clients in the same virtual server
-			* global clients
-
-		The directives are also *exclusive*, not *additive*.
-		If you have one client in a virtual server, and
-		another client referenced from a "listen" section,
-		then that "listen" section will ONLY use the second
-		client.  It will NOT use both clients.
-
-
-5. More complex "client" capabilities
-
-  The "client" sections have a few additional configuration items that
-were not in 1.x, and were not mentioned above.  These configuration
-items enable almost any mapping of IP / port to clients to virtual
-servers.
-
-  The configuration items are:
-
-	virtual_server = <name>
-
-		If set, all requests from this client are processed
-		through the named virtual server.
-
-		This directive can be used only for "client" sections
-		that are global.  i.e. It CANNOT be used if the
-		"client" section is inside of a virtual server.
-
-  If the "listen" section has a "server" entry, and a matching
-client is found ALSO with a "server" entry, then the clients server is
-used for that request.
-
-
-6. Worked examples
-
-
-  Listening on one socket, and mapping requests from two clients to
-two different servers.
-
-	listen {
-		...
-	}
-	client one {
-		...
-		virtual_server = server_one
-	}
-	client two {
-		...
-		virtual_server = server_two
-	}
-	server server_one {
-		authorize {
-			...
-		}
-		...
-	}
-	server server_two {
-		authorize {
-			...
-		}
-		...
-	}
-
-  This could also be done as:
-
-
-	listen {
-		...
-		virtual_server = server_one
-	}
-	client one {
-		...
-	}
-	client two {
-		...
-		virtual_server = server_two
-	}
-	server server_one {
-		authorize {
-			...
-		}
-		...
-	}
-	server server_two {
-		authorize {
-			...
-		}
-		...
-	}
-
-  In this case, the default server for the socket is "server_one", so
-there is no need to set that in the client "one" configuration.  The
-"server_two" configuration for client "two" over-rides the default
-setting for the socket.
-
-  Note that the following configuration will NOT work:
-
-	listen {
-		...
-		virtual_server = server_one
-	}
-	client one {
-		...
-	}
-	server server_one {
-		authorize {
-			...
-		}
-		...
-	}
-	server server_two {
-		client two {
-			...
-		}
-		authorize {
-			...
-		}
-		...
-	}
-
-  In this example, client "two" is hidden inside of the virtual
-server, where the "listen" section cannot find it.
-
-
-7. Outlined examples
-
-  This section outlines a number of examples, with alternatives.
-
-  One server, multiple sockets
-	- multiple "listen" sections in a "server" section
-
-  one server per client
-	- define multiple servers
-	- have a global "listen" section
-	- have multiple global "clients", each with "virtual_server = X"
-
-  two servers, each with their own sockets
-	- define multiple servers
-	- put "client" sections into each "server"
-	- put a "listen" section into each "server"
-
-	Each server can list the same client IP, and the secret
-	can be different
-
-  two sockets, sharing a list of clients, but pointing to different servers
-	- define global "listen" sections
-	- in each, set "virtual_server = X"
-	- in each, set "clients = Y"
-	- define "clients Y" section, containing multiple clients.
-
-	This also means that you can have a third socket, which
-	doesn't share any of these clients.
-
-
-8.  How to decide what to do
-
-
-  If you want *completely* separate policies for a socket or a client,
-then create a separate virtual server.  Then, map the request to that
-server by setting configuration entries in a "listen" section or in a
-"client" section.
-
-  Start off with the common cases first.  If most of the clients
-and/or sockets get a particular policy, make that policy the default.
-Configure it without paying attention to the sockets or clients you
-want to add later, and without adding a second virtual server.  Once
-it works, then add the second virtual server.
-
-  If you want to re-use the previously defined sockets with the second
-virtual server, then you will need one or more global "client"
-sections.  Those clients will contain a "virtual_server = ..." entry
-that will direct requests from those clients to the appropriate
-virtual server.
diff --git a/src/test/setup/radius-config/freeradius/sites-available/buffered-sql b/src/test/setup/radius-config/freeradius/sites-available/buffered-sql
deleted file mode 100644
index 4217d99..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/buffered-sql
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	In 2.0.0, radrelay functionality is integrated into the
-#	server core.  This virtual server gives an example of
-#	using radrelay functionality inside of the server.
-#
-#	In this example, the detail file is read, and the data
-#	is put into SQL.  This configuration is used when a RADIUS
-#	server on this machine is receiving accounting packets,
-#	and writing them to the detail file.
-#
-#	The purpose of this virtual server is to de-couple the storage
-#	of long-term accounting data in SQL from "live" information
-#	needed by the RADIUS server as it is running.
-#
-#	The benefit of this approach is that for a busy server, the
-#	overhead of performing SQL queries may be significant.  Also,
-#	if the SQL databases are large (as is typical for ones storing
-#	months of data), the INSERTs and UPDATEs may take a relatively
-#	long time.  Rather than slowing down the RADIUS server by
-#	having it interact with a database, you can just log the
-#	packets to a detail file, and then read that file later at a
-#	time when the RADIUS server is typically lightly loaded.
-#
-#	If you use on virtual server to log to the detail file,
-#	and another virtual server (i.e. this one) to read from
-#	the detail file, then this process will happen automatically.
-#	A sudden spike of RADIUS traffic means that the detail file
-#	will grow in size, and the server will be able to handle
-#	large volumes of traffic quickly.  When the traffic dies down,
-#	the server will have time to read the detail file, and insert
-#	the data into a long-term SQL database.
-#
-#	$Id: bc5abe8e104accca792de61201c741d07e825894 $
-#
-######################################################################
-
-server buffered-sql {
-	listen {
-		type = detail
-
-		#  The location where the detail file is located.
-		#  This should be on local disk, and NOT on an NFS
-		#  mounted location!
-		filename = "${radacctdir}/detail-*"
-
-		#
-		#  The server can read accounting packets from the
-		#  detail file much more quickly than those packets
-		#  can be written to a database.  If the database is
-		#  overloaded, then bad things can happen.
-		#
-		#  The server will keep track of how long it takes to
-		#  process an entry from the detail file.  It will
-		#  then pause between handling entries.  This pause
-		#  allows databases to "catch up", and gives the
-		#  server time to notice that other packets may have
-		#  arrived.
-		#
-		#  The pause is calculated dynamically, to ensure that
-		#  the load due to reading the detail files is limited
-		#  to a small percentage of CPU time.  The
-		#  "load_factor" configuration item is a number
-		#  between 1 and 100.  The server will try to keep the
-		#  percentage of time taken by "detail" file entries
-		#  to "load_factor" percentage of the CPU time.
-		#
-		#  If the "load_factor" is set to 100, then the server
-		#  will read packets as fast as it can, usually
-		#  causing databases to go into overload.
-		#
-		load_factor = 10
-
-		#
-		#  Set the interval for polling the detail file.
-		#  If the detail file doesn't exist, the server will
-		#  wake up, and poll for it every N seconds.
-		#
-		#  Useful range of values: 1 to 60
-		poll_interval = 1
-
-		#
-		#  Set the retry interval for when the home server
-		#  does not respond.  The current packet will be
-		#  sent repeatedly, at this interval, until the
-		#  home server responds.
-		#
-		#  Useful range of values: 5 to 30
-		retry_interval = 30
-
-	}
-
-	#
-	#  Pre-accounting.  Decide which accounting type to use.
-	#
-	preacct {
-		preprocess
-
-		#
-		#  Ensure that we have a semi-unique identifier for every
-		#  request, and many NAS boxes are broken.
-		acct_unique
-
-		#
-		#  Read the 'acct_users' file.  This isn't always
-		#  necessary, and can be deleted if you do not use it.
-		files
-	}
-
-	#
-	#  Accounting.  Log the accounting data.
-	#
-	accounting {
-		#
-		#  Log traffic to an SQL database.
-		#
-		#  See "Accounting queries" in sql.conf
-	#	sql
-
-
-		#  Cisco VoIP specific bulk accounting
-	#	pgsql-voip
-
-	}
-
-	# The requests are not being proxied, so no pre/post-proxy
-	# sections are necessary.
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls b/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls
deleted file mode 100644
index d84378f..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls
+++ /dev/null
@@ -1,131 +0,0 @@
-# This virtual server allows EAP-TLS to reject access requests
-# based on some certificate attributes.
-#
-# Value-pairs that are available for checking include:
-#
-#   TLS-Client-Cert-Subject
-#   TLS-Client-Cert-Issuer
-#   TLS-Client-Cert-Common-Name
-#   TLS-Client-Cert-Subject-Alt-Name-Email
-#
-# To see a full list of attributes, run the server in debug mode
-# with this virtual server configured, and look at the attributes
-# passed in to this virtual server.
-#
-#
-# This virtual server is also useful when using EAP-TLS as it is only called
-# once, just before the final Accept is about to be returned from eap, whereas
-# the outer authorize section is called multiple times for each challenge /
-# response. For this reason, here may be a good location to put authentication
-# logging, and modules that check for further authorization, especially if they
-# hit external services such as sql or ldap.
-
-server check-eap-tls {
-
-
-# Authorize - this is the only section required.
-#
-# To accept the access request, set Auth-Type = Accept, otherwise
-# set it to Reject.
-
-authorize {
-
-	#
-	# By default, we just accept the request:
-	#
-	update config {
-		Auth-Type := Accept
-	}
-
-
-	#
-	# Check the client certificate matches a string, and reject otherwise
-	#
-
-#	if ("%{TLS-Client-Cert-Common-Name}" == "client.example.com") {
-#		update config {
-#			Auth-Type := Accept
-#		}
-#	}
-#	else {
-#		update config {
-#			Auth-Type := Reject
-#		}
-#		update reply {
-#			Reply-Message := "Your certificate is not valid."
-#		}
-#	}
-
-
-	#
-	# Check the client certificate common name against the supplied User-Name
-	#
-#	if ("host/%{TLS-Client-Cert-Common-Name}" == "%{User-Name}") {
-#		update config {
-#			Auth-Type := Accept
-#		}
-#	}
-#	else {
-#		update config {
-#			Auth-Type := Reject
-#		}
-#	}
-
-
-	#
-	# This is a convenient place to call LDAP, for example, when using
-	# EAP-TLS, as it will only be called once, after all certificates as
-	# part of the EAP-TLS challenge process have been verified.
-	#
-	# An example could be to use LDAP to check that the connecting host, as
-	# well as presenting a valid certificate, is also in a group based on
-	# the User-Name (assuming this contains the service principal name).
-	# Settings such as the following could be used in the ldap module
-	# configuration:
-	#
-	# basedn = "dc=example, dc=com"
-	# filter = "(servicePrincipalName=%{User-Name})"
-	# base_filter = "(objectClass=computer)"
-	# groupname_attribute = cn
-	# groupmembership_filter = "(&(objectClass=group)(member=%{control:Ldap-UserDn}))"
-
-#	ldap
-
-	# Now let's test membership of an LDAP group (the ldap bind user will
-	# need permission to read this group membership):
-
-#	if (!(Ldap-Group == "Permitted-Laptops")) {
-#		update config {
-#			Auth-Type := Reject
-#		}
-#	}
-
-	# or, to be more specific, you could use the group's full DN:
-	# if (!(Ldap-Group == "CN=Permitted-Laptops,OU=Groups,DC=example,DC=org")) {
-
-
-	#
-	# This may be a better place to call the files modules when using
-	# EAP-TLS, as it will only be called once, after the challenge-response
-	# iteration has completed.
-	#
-
-#	files
-
-
-	#
-	# Log all request attributes, plus TLS certificate details, to the
-	# detail auth_log. Again, this is just once per connection request, so
-	# may be preferable than in the outer authorize section. It is
-	# suggested that 'auth_log' also be in the outer post-auth and
-	# Post-Auth REJECT sections to log reply packet details, too.
-	#
-
-	auth_log
-
-}
-
-
-
-}
-
diff --git a/src/test/setup/radius-config/freeradius/sites-available/coa b/src/test/setup/radius-config/freeradius/sites-available/coa
deleted file mode 100644
index 66caa31..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/coa
+++ /dev/null
@@ -1,49 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#  Sample virtual server for receiving a CoA or Disconnect-Request packet.
-#
-
-#  Listen on the CoA port.
-#
-#  This uses the normal set of clients, with the same secret as for
-#  authentication and accounting.
-#
-listen {
-	type = coa
-	ipaddr = *
-	port = 3799
-	server = coa
-}
-
-server coa {
-	#  When a packet is received, it is processed through the
-	#  recv-coa section.  This applies to *both* CoA-Request and
-	#  Disconnect-Request packets.
-	recv-coa {
-		#  CoA && Disconnect packets can be proxied in the same
-		#  way as authentication or accounting packets.
-		#  Just set Proxy-To-Realm, or Home-Server-Pool, and the
-		#  packets will be proxied.
-
-		#  Do proxying based on realms here.  You don't need
-		#  "IPASS" or "ntdomain", as the proxying is based on
-		#  the Operator-Name attribute.  It contains the realm,
-		#  and ONLY the realm (prefixed by a '1')
-		suffix
-
-		#  Insert your own policies here.
-		ok
-	}
-
-	#  When a packet is sent, it is processed through the
-	#  recv-coa section.  This applies to *both* CoA-Request and
-	#  Disconnect-Request packets.
-	send-coa {
-		#  Sample module.
-		ok
-	}
-
-	#  You can use pre-proxy and post-proxy sections here, too.
-	#  They will be processed for sending && receiving proxy packets.
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/control-socket b/src/test/setup/radius-config/freeradius/sites-available/control-socket
deleted file mode 100644
index c3f813d..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/control-socket
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	Control socket interface.
-#
-#	In the future, we will add username/password checking for
-#	connections to the control socket.  We will also add
-#	command authorization, where the commands entered by the
-#	administrator are run through a virtual server before
-#	they are executed.
-#
-#	For now, anyone who has permission to connect to the socket
-#	has nearly complete control over the server.  Be warned!
-#
-#	This functionality is NOT enabled by default.
-#
-#	See also the "radmin" program, which is used to communicate
-#	with the server over the control socket.
-#
-#	$Id: 8d06082d3a8fba31bb1471aef19e28093cee4a9e $
-#
-######################################################################
-listen {
-	#
-	#  Listen on the control socket.
-	#
-	type = control
-
-	#
-	#  Socket location.
-	#
-	#  This file is created with the server's uid and gid.
-	#  It's permissions are r/w for that user and group, and
-	#  no permissions for "other" users.  These permissions form
-	#  minimal security, and should not be relied on.
-	#
-	socket = ${run_dir}/${name}.sock
-
-	#
-	#  The following two parameters perform authentication and
-	#  authorization of connections to the control socket.
-	#
-	#  If not set, then ANYONE can connect to the control socket,
-	#  and have complete control over the server.  This is likely
-	#  not what you want.
-	#
-	#  One, or both, of "uid" and "gid" should be set.  If set, the
-	#  corresponding value is checked.  Unauthorized users result
-	#  in an error message in the log file, and the connection is
-	#  closed.
-	#
-
-	#
-	#  Name of user that is allowed to connect to the control socket.
-	#
-#	uid = radius
-
-	#
-	#  Name of group that is allowed to connect to the control socket.
-	#
-#	gid = radius
-
-	#
-	#  Access mode.
-	#
-	#  This can be used to give *some* administrators access to
-	#  monitor the system, but not to change it.
-	#
-	#	ro = read only access (default)
-	#	rw = read/write access.
-	#
-#	mode = rw
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server b/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server
deleted file mode 100644
index 5f962f8..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server
+++ /dev/null
@@ -1,169 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	In 2.0.0, radrelay functionality is integrated into the
-#	server core.  This virtual server gives an example of
-#	using radrelay functionality inside of the server.
-#
-#	In this example, the detail file is read, and the packets
-#	are proxied to a home server.  You will have to configure
-#	realms, home_server_pool, and home_server in proxy.conf
-#	for this to work.
-#
-#	The purpose of this virtual server is to enable duplication
-#	of information across a load-balanced, or fail-over set of
-#	servers.  For example, if a group of clients lists two
-#	home servers (primary, secondary), then RADIUS accounting
-#	messages will go only to one server at a time.  This file
-#	configures a server (primary, secondary) to send copies of
-#	the accounting information to each other.
-#
-#	That way, each server has the same set of information, and
-#	can make the same decision about the user.
-#
-#	$Id: 2869287260929f35d1a575b52014de20ce6cf3bb $
-#
-######################################################################
-
-server copy-acct-to-home-server {
-	listen {
-		type = detail
-
-		######################################################
-		#
-		#  !!!! WARNING !!!!
-		#
-		#  The detail file reader acts just like a NAS.
-		#
-		#  This means that if accounting fails, the packet
-		#  is re-tried FOREVER.  It is YOUR responsibility
-		#  to write an accounting policy that returns "ok"
-		#  if the packet was processed properly, "fail" on
-		#  a database error, AND "ok" if you want to ignore
-		#  the packet (e.g. no Acct-Status-Type).
-		#
-		#  Neither the detail file write OR the detail file
-		#  reader look at the contents of the packets.  They
-		#  just either dump the packet verbatim to the file,
-		#  or read it verbatim from the file and pass it to
-		#  the server.
-		#
-		######################################################
-
-
-		#  The location where the detail file is located.
-		#  This should be on local disk, and NOT on an NFS
-		#  mounted location!
-		#
-		#  On most systems, this should support file globbing
-		#  e.g. "${radacctdir}/detail-*:*"
-		#  This lets you write many smaller detail files as in
-		#  the example in radiusd.conf: ".../detail-%Y%m%d:%H"
-		#  Writing many small files is often better than writing
-		#  one large file.  File globbing also means that with
-		#  a common naming scheme for detail files, then you can
-		#  have many detail file writers, and only one reader.
-		filename = ${radacctdir}/detail
-
-		#
-		#  The server can read accounting packets from the
-		#  detail file much more quickly than those packets
-		#  can be written to a database.  If the database is
-		#  overloaded, then bad things can happen.
-		#
-		#  The server will keep track of how long it takes to
-		#  process an entry from the detail file.  It will
-		#  then pause between handling entries.  This pause
-		#  allows databases to "catch up", and gives the
-		#  server time to notice that other packets may have
-		#  arrived.
-		#
-		#  The pause is calculated dynamically, to ensure that
-		#  the load due to reading the detail files is limited
-		#  to a small percentage of CPU time.  The
-		#  "load_factor" configuration item is a number
-		#  between 1 and 100.  The server will try to keep the
-		#  percentage of time taken by "detail" file entries
-		#  to "load_factor" percentage of the CPU time.
-		#
-		#  If the "load_factor" is set to 100, then the server
-		#  will read packets as fast as it can, usually
-		#  causing databases to go into overload.
-		#
-		load_factor = 10
-	}
-
-	#
-	#  Pre-accounting.  Decide which accounting type to use.
-	#
-	preacct {
-		preprocess
-
-		# Since we're just proxying, we don't need acct_unique.
-
-		#
-		#  Look for IPASS-style 'realm/', and if not found, look for
-		#  '@realm', and decide whether or not to proxy, based on
-		#  that.
-		#
-		#  Accounting requests are generally proxied to the same
-		#  home server as authentication requests.
-	#	IPASS
-		suffix
-	#	ntdomain
-
-		#
-		#  Read the 'acct_users' file.  This isn't always
-		#  necessary, and can be deleted if you do not use it.
-		files
-	}
-
-	#
-	#  Accounting.  Log the accounting data.
-	#
-	accounting {
-		   #
-		   # Since we're proxying, we don't log anything
-		   # locally.  Ensure that the accounting section
-		   # "succeeds" by forcing an "ok" return.
-		   ok
-	}
-
-
-	#
-	#  When the server decides to proxy a request to a home server,
-	#  the proxied request is first passed through the pre-proxy
-	#  stage.  This stage can re-write the request, or decide to
-	#  cancel the proxy.
-	#
-	#  Only a few modules currently have this method.
-	#
-	pre-proxy {
-
-		#  If you want to have a log of packets proxied to a home
-		#  server, un-comment the following line, and the
-		#  'detail pre_proxy_log' section in radiusd.conf.
-	#	pre_proxy_log
-	}
-
-	#
-	#  When the server receives a reply to a request it proxied
-	#  to a home server, the request may be massaged here, in the
-	#  post-proxy stage.
-	#
-	post-proxy {
-		#
-
-		#  If you want to have a log of replies from a home
-		#  server, un-comment the following line, and the
-		#  'detail post_proxy_log' section in radiusd.conf.
-	#	post_proxy_log
-
-
-		#  Uncomment the following line if you want to filter
-		#  replies from remote proxies based on the rules
-		#  defined in the 'attrs' file.
-
-	#	attr_filter
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting b/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting
deleted file mode 100644
index 199258d..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting
+++ /dev/null
@@ -1,140 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a sample configuration for "decoupled" accounting.
-#	"Decoupled" accounting is where the accounting packets are
-#	NOT written "live" to the back-end database.  This method
-#	can only be used if you are not interested in "live"
-#	accounting.  i.e. Where you can tolerate delays that may be
-#	a few seconds, before accounting packets get written to
-#	the DB.
-#
-#	Oddly enough, this method can speed up the processing of
-#	accounting packets, as all database activity is serialized.
-#
-#	This file is NOT meant to be used as-is.  It needs to be
-#	edited to match your local configuration.
-#
-#	$Id$
-#
-######################################################################
-
-#  Define a virtual server to write the accounting packets.
-#  Any "listen" section that listens on an accounting port should
-#  set "virtual_server = write-detail.example.com
-server write_detail.example.com {
-	accounting {
-		#
-		#  Write the "detail" files.
-		#
-		#  See raddb/modules/detail.example.com for more info.
-		detail.example.com
-	}
-
-	# That's it!
-}
-
-#  Define a virtual server to process the accounting packets.
-server read-detail.example.com {
-	#  Read accounting packets from the detail file(s) for
-	#  the home server.
-	listen {
-		type = detail
-		filename = "${radacctdir}/detail.example.com/detail-*:*"
-		load_factor = 10
-	}
-
-	#  All packets read from the detail file are processed through
-	#  the preacct && accounting sections.
-	#
-	#  The following text is copied verbatim from sites-available/default.
-	#  You should edit it for your own local configuration.
-
-#
-#  Pre-accounting.  Decide which accounting type to use.
-#
-preacct {
-	preprocess
-
-	#
-	#  Ensure that we have a semi-unique identifier for every
-	#  request, and many NAS boxes are broken.
-	acct_unique
-
-	#
-	#  Look for IPASS-style 'realm/', and if not found, look for
-	#  '@realm', and decide whether or not to proxy, based on
-	#  that.
-	#
-	#  Accounting requests are generally proxied to the same
-	#  home server as authentication requests.
-#	IPASS
-	suffix
-#	ntdomain
-
-	#
-	#  Read the 'acct_users' file
-	files
-}
-
-#
-#  Accounting.  Log the accounting data.
-#
-accounting {
-	#
-	#  Create a 'detail'ed log of the packets.
-	#  Note that accounting requests which are proxied
-	#  are also logged in the detail file.
-	detail
-#	daily
-
-	#  Update the wtmp file
-	#
-	#  If you don't use "radlast", you can delete this line.
-	unix
-
-	#
-	#  For Simultaneous-Use tracking.
-	#
-	#  Due to packet losses in the network, the data here
-	#  may be incorrect.  There is little we can do about it.
-	radutmp
-#	sradutmp
-
-	#  Return an address to the IP Pool when we see a stop record.
-#	main_pool
-
-	#
-	#  Log traffic to an SQL database.
-	#
-	#  NOTE! You will have to ensure that any accounting packets
-	#  NOT handled by the SQL module (e.g. "stop with zero session length"
-	#  result in the accounting section still returning "ok".
-	#
-	#  Otherwise, the server will think that the accounting packet
-	#  was NOT handled properly, and will keep trying to process it
-	#  through this virtual server!
-	#
-	#  See "Accounting queries" in sql.conf
-#	sql
-
-	#
-	#  Instead of sending the query to the SQL server,
-	#  write it into a log file.
-	#
-#	sql_log
-
-	#  Cisco VoIP specific bulk accounting
-#	pgsql-voip
-
-	#  Filter attributes from the accounting response.
-	attr_filter.accounting_response
-
-	#
-	#  See "Autz-Type Status-Server" for how this works.
-	#
-#	Acct-Type Status-Server {
-#
-#	}
-}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/default b/src/test/setup/radius-config/freeradius/sites-available/default
deleted file mode 100644
index 934f835..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/default
+++ /dev/null
@@ -1,844 +0,0 @@
-######################################################################
-#
-#	As of 2.0.0, FreeRADIUS supports virtual hosts using the
-#	"server" section, and configuration directives.
-#
-#	Virtual hosts should be put into the "sites-available"
-#	directory.  Soft links should be created in the "sites-enabled"
-#	directory to these files.  This is done in a normal installation.
-#
-#	If you are using 802.1X (EAP) authentication, please see also
-#	the "inner-tunnel" virtual server.  You will likely have to edit
-#	that, too, for authentication to work.
-#
-#	$Id: 3278975e054fab504afda5ba8fc999239cb2fb9d $
-#
-######################################################################
-#
-#	Read "man radiusd" before editing this file.  See the section
-#	titled DEBUGGING.  It outlines a method where you can quickly
-#	obtain the configuration you want, without running into
-#	trouble.  See also "man unlang", which documents the format
-#	of this file.
-#
-#	This configuration is designed to work in the widest possible
-#	set of circumstances, with the widest possible number of
-#	authentication methods.  This means that in general, you should
-#	need to make very few changes to this file.
-#
-#	The best way to configure the server for your local system
-#	is to CAREFULLY edit this file.  Most attempts to make large
-#	edits to this file will BREAK THE SERVER.  Any edits should
-#	be small, and tested by running the server with "radiusd -X".
-#	Once the edits have been verified to work, save a copy of these
-#	configuration files somewhere.  (e.g. as a "tar" file).  Then,
-#	make more edits, and test, as above.
-#
-#	There are many "commented out" references to modules such
-#	as ldap, sql, etc.  These references serve as place-holders.
-#	If you need the functionality of that module, then configure
-#	it in radiusd.conf, and un-comment the references to it in
-#	this file.  In most cases, those small changes will result
-#	in the server being able to connect to the DB, and to
-#	authenticate users.
-#
-######################################################################
-
-server default {
-#
-#  If you want the server to listen on additional addresses, or on
-#  additional ports, you can use multiple "listen" sections.
-#
-#  Each section make the server listen for only one type of packet,
-#  therefore authentication and accounting have to be configured in
-#  different sections.
-#
-#  The server ignore all "listen" section if you are using '-i' and '-p'
-#  on the command line.
-#
-listen {
-	#  Type of packets to listen for.
-	#  Allowed values are:
-	#	auth	listen for authentication packets
-	#	acct	listen for accounting packets
-	#	proxy   IP to use for sending proxied packets
-	#	detail  Read from the detail file.  For examples, see
-	#               raddb/sites-available/copy-acct-to-home-server
-	#	status  listen for Status-Server packets.  For examples,
-	#		see raddb/sites-available/status
-	#	coa     listen for CoA-Request and Disconnect-Request
-	#		packets.  For examples, see the file
-	#		raddb/sites-available/coa-server
-	#
-	type = auth
-
-	#  Note: "type = proxy" lets you control the source IP used for
-	#        proxying packets, with some limitations:
-	#
-	#    * A proxy listener CANNOT be used in a virtual server section.
-	#    * You should probably set "port = 0".
-	#    * Any "clients" configuration will be ignored.
-	#
-	#  See also proxy.conf, and the "src_ipaddr" configuration entry
-	#  in the sample "home_server" section.  When you specify the
-	#  source IP address for packets sent to a home server, the
-	#  proxy listeners are automatically created.
-
-	#  IP address on which to listen.
-	#  Allowed values are:
-	#	dotted quad (1.2.3.4)
-	#       hostname    (radius.example.com)
-	#       wildcard    (*)
-	ipaddr = *
-
-	#  OR, you can use an IPv6 address, but not both
-	#  at the same time.
-#	ipv6addr = ::	# any.  ::1 == localhost
-
-	#  Port on which to listen.
-	#  Allowed values are:
-	#	integer port number (1812)
-	#	0 means "use /etc/services for the proper port"
-	port = 0
-
-	#  Some systems support binding to an interface, in addition
-	#  to the IP address.  This feature isn't strictly necessary,
-	#  but for sites with many IP addresses on one interface,
-	#  it's useful to say "listen on all addresses for eth0".
-	#
-	#  If your system does not support this feature, you will
-	#  get an error if you try to use it.
-	#
-#	interface = eth0
-
-	#  Per-socket lists of clients.  This is a very useful feature.
-	#
-	#  The name here is a reference to a section elsewhere in
-	#  radiusd.conf, or clients.conf.  Having the name as
-	#  a reference allows multiple sockets to use the same
-	#  set of clients.
-	#
-	#  If this configuration is used, then the global list of clients
-	#  is IGNORED for this "listen" section.  Take care configuring
-	#  this feature, to ensure you don't accidentally disable a
-	#  client you need.
-	#
-	#  See clients.conf for the configuration of "per_socket_clients".
-	#
-#	clients = per_socket_clients
-
-	#
-	#  Connection limiting for sockets with "proto = tcp".
-	#
-	#  This section is ignored for other kinds of sockets.
-	#
-	limit {
-	      #
-	      #  Limit the number of simultaneous TCP connections to the socket
-	      #
-	      #  The default is 16.
-	      #  Setting this to 0 means "no limit"
-	      max_connections = 16
-
-	      #  The per-socket "max_requests" option does not exist.
-
-	      #
-	      #  The lifetime, in seconds, of a TCP connection.  After
-	      #  this lifetime, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "forever".
-	      lifetime = 0
-
-	      #
-	      #  The idle timeout, in seconds, of a TCP connection.
-	      #  If no packets have been received over the connection for
-	      #  this time, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "no timeout".
-	      #
-	      #  We STRONGLY RECOMMEND that you set an idle timeout.
-	      #
-	      idle_timeout = 30
-	}
-}
-
-#
-#  This second "listen" section is for listening on the accounting
-#  port, too.
-#
-listen {
-	ipaddr = *
-#	ipv6addr = ::
-	port = 0
-	type = acct
-#	interface = eth0
-#	clients = per_socket_clients
-
-	limit {
-		#  The number of packets received can be rate limited via the
-		#  "max_pps" configuration item.  When it is set, the server
-		#  tracks the total number of packets received in the previous
-		#  second.  If the count is greater than "max_pps", then the
-		#  new packet is silently discarded.  This helps the server
-		#  deal with overload situations.
-		#
-		#  The packets/s counter is tracked in a sliding window.  This
-		#  means that the pps calculation is done for the second
-		#  before the current packet was received.  NOT for the current
-		#  wall-clock second, and NOT for the previous wall-clock second.
-		#
-		#  Useful values are 0 (no limit), or 100 to 10000.
-		#  Values lower than 100 will likely cause the server to ignore
-		#  normal traffic.  Few systems are capable of handling more than
-		#  10K packets/s.
-		#
-		#  It is most useful for accounting systems.  Set it to 50%
-		#  more than the normal accounting load, and you can be sure that
-		#  the server will never get overloaded
-		#
-#		max_pps = 0
-
-		# Only for "proto = tcp". These are ignored for "udp" sockets.
-		#
-#		idle_timeout = 0
-#		lifetime = 0
-#		max_connections = 0
-	}
-}
-
-#  Authorization. First preprocess (hints and huntgroups files),
-#  then realms, and finally look in the "users" file.
-#
-#  Any changes made here should also be made to the "inner-tunnel"
-#  virtual server.
-#
-#  The order of the realm modules will determine the order that
-#  we try to find a matching realm.
-#
-#  Make *sure* that 'preprocess' comes before any realm if you
-#  need to setup hints for the remote radius server
-authorize {
-	#
-	#  Take a User-Name, and perform some checks on it, for spaces and other
-	#  invalid characters.  If the User-Name appears invalid, reject the
-	#  request.
-	#
-	#  See policy.d/filter for the definition of the filter_username policy.
-	#
-	filter_username
-
-	#
-	#  The preprocess module takes care of sanitizing some bizarre
-	#  attributes in the request, and turning them into attributes
-	#  which are more standard.
-	#
-	#  It takes care of processing the 'raddb/hints' and the
-	#  'raddb/huntgroups' files.
-	preprocess
-
-	#  If you intend to use CUI and you require that the Operator-Name
-	#  be set for CUI generation and you want to generate CUI also
-	#  for your local clients then uncomment the operator-name
-	#  below and set the operator-name for your clients in clients.conf
-#       operator-name
-
-	#
-	#  If you want to generate CUI for some clients that do not
-	#  send proper CUI requests, then uncomment the
-	#  cui below and set "add_cui = yes" for these clients in clients.conf
-#       cui
-
-	#
-	#  If you want to have a log of authentication requests,
-	#  un-comment the following line, and the 'detail auth_log'
-	#  section, above.
-#	auth_log
-
-	#
-	#  The chap module will set 'Auth-Type := CHAP' if we are
-	#  handling a CHAP request and Auth-Type has not already been set
-	#chap
-
-	#
-	#  If the users are logging in with an MS-CHAP-Challenge
-	#  attribute for authentication, the mschap module will find
-	#  the MS-CHAP-Challenge attribute, and add 'Auth-Type := MS-CHAP'
-	#  to the request, which will cause the server to then use
-	#  the mschap module for authentication.
-	#mschap
-
-	#
-	#  If you have a Cisco SIP server authenticating against
-	#  FreeRADIUS, uncomment the following line, and the 'digest'
-	#  line in the 'authenticate' section.
-	#digest
-
-	#
-	#  The WiMAX specification says that the Calling-Station-Id
-	#  is 6 octets of the MAC.  This definition conflicts with
-	#  RFC 3580, and all common RADIUS practices.  Un-commenting
-	#  the "wimax" module here means that it will fix the
-	#  Calling-Station-Id attribute to the normal format as
-	#  specified in RFC 3580 Section 3.21
-#	wimax
-
-	#
-	#  Look for IPASS style 'realm/', and if not found, look for
-	#  '@realm', and decide whether or not to proxy, based on
-	#  that.
-#	IPASS
-
-	#
-	#  If you are using multiple kinds of realms, you probably
-	#  want to set "ignore_null = yes" for all of them.
-	#  Otherwise, when the first style of realm doesn't match,
-	#  the other styles won't be checked.
-	#
-	suffix
-#	ntdomain
-
-	#
-	#  This module takes care of EAP-MD5, EAP-TLS, and EAP-LEAP
-	#  authentication.
-	#
-	#  It also sets the EAP-Type attribute in the request
-	#  attribute list to the EAP type from the packet.
-	#
-	#  As of 2.0, the EAP module returns "ok" in the authorize stage
-	#  for TTLS and PEAP.  In 1.x, it never returned "ok" here, so
-	#  this change is compatible with older configurations.
-	#
-	#  The example below uses module failover to avoid querying all
-	#  of the following modules if the EAP module returns "ok".
-	#  Therefore, your LDAP and/or SQL servers will not be queried
-	#  for the many packets that go back and forth to set up TTLS
-	#  or PEAP.  The load on those servers will therefore be reduced.
-	#
-	eap {
-		ok = return
-	}
-
-	#
-	#  Pull crypt'd passwords from /etc/passwd or /etc/shadow,
-	#  using the system API's to get the password.  If you want
-	#  to read /etc/passwd or /etc/shadow directly, see the
-	#  passwd module in radiusd.conf.
-	#
-#	unix
-
-	#
-	#  Read the 'users' file
-	files
-
-	#
-	#  Look in an SQL database.  The schema of the database
-	#  is meant to mirror the "users" file.
-	#
-	#  See "Authorization Queries" in sql.conf
-	-sql
-
-	#
-	#  If you are using /etc/smbpasswd, and are also doing
-	#  mschap authentication, the un-comment this line, and
-	#  configure the 'smbpasswd' module.
-#	smbpasswd
-
-	#
-	#  The ldap module reads passwords from the LDAP database.
-	-ldap
-
-	#
-	#  Enforce daily limits on time spent logged in.
-#	daily
-
-	#
-	expiration
-	logintime
-
-	#
-	#  If no other module has claimed responsibility for
-	#  authentication, then try to use PAP.  This allows the
-	#  other modules listed above to add a "known good" password
-	#  to the request, and to do nothing else.  The PAP module
-	#  will then see that password, and use it to do PAP
-	#  authentication.
-	#
-	#  This module should be listed last, so that the other modules
-	#  get a chance to set Auth-Type for themselves.
-	#
-	pap
-
-	#
-	#  If "status_server = yes", then Status-Server messages are passed
-	#  through the following section, and ONLY the following section.
-	#  This permits you to do DB queries, for example.  If the modules
-	#  listed here return "fail", then NO response is sent.
-	#
-#	Autz-Type Status-Server {
-#
-#	}
-}
-
-
-#  Authentication.
-#
-#
-#  This section lists which modules are available for authentication.
-#  Note that it does NOT mean 'try each module in order'.  It means
-#  that a module from the 'authorize' section adds a configuration
-#  attribute 'Auth-Type := FOO'.  That authentication type is then
-#  used to pick the appropriate module from the list below.
-#
-
-#  In general, you SHOULD NOT set the Auth-Type attribute.  The server
-#  will figure it out on its own, and will do the right thing.  The
-#  most common side effect of erroneously setting the Auth-Type
-#  attribute is that one authentication method will work, but the
-#  others will not.
-#
-#  The common reasons to set the Auth-Type attribute by hand
-#  is to either forcibly reject the user (Auth-Type := Reject),
-#  or to or forcibly accept the user (Auth-Type := Accept).
-#
-#  Note that Auth-Type := Accept will NOT work with EAP.
-#
-#  Please do not put "unlang" configurations into the "authenticate"
-#  section.  Put them in the "post-auth" section instead.  That's what
-#  the post-auth section is for.
-#
-authenticate {
-	#
-	#  PAP authentication, when a back-end database listed
-	#  in the 'authorize' section supplies a password.  The
-	#  password can be clear-text, or encrypted.
-	Auth-Type PAP {
-		pap
-	}
-
-	#
-	#  Most people want CHAP authentication
-	#  A back-end database listed in the 'authorize' section
-	#  MUST supply a CLEAR TEXT password.  Encrypted passwords
-	#  won't work.
-	#Auth-Type CHAP {
-#
-#		chap
-#	}
-
-	#
-	#  MSCHAP authentication.
-#	Auth-Type MS-CHAP {
-#		mschap
-#	}
-
-	#
-	#  If you have a Cisco SIP server authenticating against
-	#  FreeRADIUS, uncomment the following line, and the 'digest'
-	#  line in the 'authorize' section.
-#	digest
-
-	#
-	#  Pluggable Authentication Modules.
-#	pam
-
-	#  Uncomment it if you want to use ldap for authentication
-	#
-	#  Note that this means "check plain-text password against
-	#  the ldap database", which means that EAP won't work,
-	#  as it does not supply a plain-text password.
-	#
-	#  We do NOT recommend using this.  LDAP servers are databases.
-	#  They are NOT authentication servers.  FreeRADIUS is an
-	#  authentication server, and knows what to do with authentication.
-	#  LDAP servers do not.
-	#
-#	Auth-Type LDAP {
-#		ldap
-#	}
-
-	#
-	#  Allow EAP authentication.
-	eap
-
-	#
-	#  The older configurations sent a number of attributes in
-	#  Access-Challenge packets, which wasn't strictly correct.
-	#  If you want to filter out these attributes, uncomment
-	#  the following lines.
-	#
-#	Auth-Type eap {
-#		eap {
-#			handled = 1
-#		}
-#		if (handled && (Response-Packet-Type == Access-Challenge)) {
-#			attr_filter.access_challenge.post-auth
-#			handled  # override the "updated" code from attr_filter
-#		}
-#	}
-}
-
-
-#
-#  Pre-accounting.  Decide which accounting type to use.
-#
-preacct {
-	preprocess
-
-	#
-	#  Merge Acct-[Input|Output]-Gigawords and Acct-[Input-Output]-Octets
-	#  into a single 64bit counter Acct-[Input|Output]-Octets64.
-	#
-#	acct_counters64
-
-	#
-	#  Session start times are *implied* in RADIUS.
-	#  The NAS never sends a "start time".  Instead, it sends
-	#  a start packet, *possibly* with an Acct-Delay-Time.
-	#  The server is supposed to conclude that the start time
-	#  was "Acct-Delay-Time" seconds in the past.
-	#
-	#  The code below creates an explicit start time, which can
-	#  then be used in other modules.  It will be *mostly* correct.
-	#  Any errors are due to the 1-second resolution of RADIUS,
-	#  and the possibility that the time on the NAS may be off.
-	#
-	#  The start time is: NOW - delay - session_length
-	#
-
-#	update request {
-#	  	FreeRADIUS-Acct-Session-Start-Time = "%{expr: %l - %{%{Acct-Session-Time}:-0} - %{%{Acct-Delay-Time}:-0}}"
-#	}
-
-
-	#
-	#  Ensure that we have a semi-unique identifier for every
-	#  request, and many NAS boxes are broken.
-	acct_unique
-
-	#
-	#  Look for IPASS-style 'realm/', and if not found, look for
-	#  '@realm', and decide whether or not to proxy, based on
-	#  that.
-	#
-	#  Accounting requests are generally proxied to the same
-	#  home server as authentication requests.
-#	IPASS
-	suffix
-#	ntdomain
-
-	#
-	#  Read the 'acct_users' file
-	files
-}
-
-#
-#  Accounting.  Log the accounting data.
-#
-accounting {
-	#  Update accounting packet by adding the CUI attribute
-	#  recorded from the corresponding Access-Accept
-	#  use it only if your NAS boxes do not support CUI themselves
-#       cui
-	#
-	#  Create a 'detail'ed log of the packets.
-	#  Note that accounting requests which are proxied
-	#  are also logged in the detail file.
-	detail
-#	daily
-
-	#  Update the wtmp file
-	#
-	#  If you don't use "radlast", you can delete this line.
-	unix
-
-	#
-	#  For Simultaneous-Use tracking.
-	#
-	#  Due to packet losses in the network, the data here
-	#  may be incorrect.  There is little we can do about it.
-#	radutmp
-#	sradutmp
-
-	#  Return an address to the IP Pool when we see a stop record.
-#	main_pool
-
-	#
-	#  Log traffic to an SQL database.
-	#
-	#  See "Accounting queries" in sql.conf
-	-sql
-
-	#
-	#  If you receive stop packets with zero session length,
-	#  they will NOT be logged in the database.  The SQL module
-	#  will print a message (only in debugging mode), and will
-	#  return "noop".
-	#
-	#  You can ignore these packets by uncommenting the following
-	#  three lines.  Otherwise, the server will not respond to the
-	#  accounting request, and the NAS will retransmit.
-	#
-#	if (noop) {
-#		ok
-#	}
-
-	#
-	#  Instead of sending the query to the SQL server,
-	#  write it into a log file.
-	#
-#	sql_log
-
-	#  Cisco VoIP specific bulk accounting
-#	pgsql-voip
-
-	# For Exec-Program and Exec-Program-Wait
-	exec
-
-	#  Filter attributes from the accounting response.
-	attr_filter.accounting_response
-
-	#
-	#  See "Autz-Type Status-Server" for how this works.
-	#
-#	Acct-Type Status-Server {
-#
-#	}
-}
-
-
-#  Session database, used for checking Simultaneous-Use. Either the radutmp
-#  or rlm_sql module can handle this.
-#  The rlm_sql module is *much* faster
-session {
-#	radutmp
-
-	#
-	#  See "Simultaneous Use Checking Queries" in sql.conf
-#	sql
-}
-
-
-#  Post-Authentication
-#  Once we KNOW that the user has been authenticated, there are
-#  additional steps we can take.
-post-auth {
-	#  Get an address from the IP Pool.
-#	main_pool
-
-
-	#  Create the CUI value and add the attribute to Access-Accept.
-	#  Uncomment the line below if *returning* the CUI.
-#       cui
-
-	#
-	#  If you want to have a log of authentication replies,
-	#  un-comment the following line, and enable the
-	#  'detail reply_log' module.
-#	reply_log
-
-	#
-	#  After authenticating the user, do another SQL query.
-	#
-	#  See "Authentication Logging Queries" in sql.conf
-	-sql
-
-	#
-	#  Instead of sending the query to the SQL server,
-	#  write it into a log file.
-	#
-#	sql_log
-
-	#
-	#  Un-comment the following if you want to modify the user's object
-	#  in LDAP after a successful login.
-	#
-#	ldap
-
-	# For Exec-Program and Exec-Program-Wait
-	exec
-
-	#
-	#  Calculate the various WiMAX keys.  In order for this to work,
-	#  you will need to define the WiMAX NAI, usually via
-	#
-	#	update request {
-	#	       WiMAX-MN-NAI = "%{User-Name}"
-	#	}
-	#
-	#  If you want various keys to be calculated, you will need to
-	#  update the reply with "template" values.  The module will see
-	#  this, and replace the template values with the correct ones
-	#  taken from the cryptographic calculations.  e.g.
-	#
-	# 	update reply {
-	#		WiMAX-FA-RK-Key = 0x00
-	#		WiMAX-MSK = "%{EAP-MSK}"
-	#	}
-	#
-	#  You may want to delete the MS-MPPE-*-Keys from the reply,
-	#  as some WiMAX clients behave badly when those attributes
-	#  are included.  See "raddb/modules/wimax", configuration
-	#  entry "delete_mppe_keys" for more information.
-	#
-#	wimax
-
-
-	#  If there is a client certificate (EAP-TLS, sometimes PEAP
-	#  and TTLS), then some attributes are filled out after the
-	#  certificate verification has been performed.  These fields
-	#  MAY be available during the authentication, or they may be
-	#  available only in the "post-auth" section.
-	#
-	#  The first set of attributes contains information about the
-	#  issuing certificate which is being used.  The second
-	#  contains information about the client certificate (if
-	#  available).
-#
-#	update reply {
-#	       Reply-Message += "%{TLS-Cert-Serial}"
-#	       Reply-Message += "%{TLS-Cert-Expiration}"
-#	       Reply-Message += "%{TLS-Cert-Subject}"
-#	       Reply-Message += "%{TLS-Cert-Issuer}"
-#	       Reply-Message += "%{TLS-Cert-Common-Name}"
-#	       Reply-Message += "%{TLS-Cert-Subject-Alt-Name-Email}"
-#
-#	       Reply-Message += "%{TLS-Client-Cert-Serial}"
-#	       Reply-Message += "%{TLS-Client-Cert-Expiration}"
-#	       Reply-Message += "%{TLS-Client-Cert-Subject}"
-#	       Reply-Message += "%{TLS-Client-Cert-Issuer}"
-#	       Reply-Message += "%{TLS-Client-Cert-Common-Name}"
-#	       Reply-Message += "%{TLS-Client-Cert-Subject-Alt-Name-Email}"
-#	}
-
-	#  Insert class attribute (with unique value) into response,
-	#  aids matching auth and acct records, and protects against duplicate
-	#  Acct-Session-Id. Note: Only works if the NAS has implemented
-	#  RFC 2865 behaviour for the class attribute, AND if the NAS
-	#  supports long Class attributes.  Many older or cheap NASes
-	#  only support 16-octet Class attributes.
-#	insert_acct_class
-
-	#  MacSEC requires the use of EAP-Key-Name.  However, we don't
-	#  want to send it for all EAP sessions.  Therefore, the EAP
-	#  modules put required data into the EAP-Session-Id attribute.
-	#  This attribute is never put into a request or reply packet.
-	#
-	#  Uncomment the next few lines to copy the required data into
-	#  the EAP-Key-Name attribute
-#	if (reply:EAP-Session-Id) {
-#		update reply {
-#			EAP-Key-Name := "%{reply:EAP-Session-Id}"
-#		}
-#	}
-
-	#  Remove reply message if the response contains an EAP-Message
-	remove_reply_message_if_eap
-
-	#
-	#  Access-Reject packets are sent through the REJECT sub-section of the
-	#  post-auth section.
-	#
-	#  Add the ldap module name (or instance) if you have set
-	#  'edir_account_policy_check = yes' in the ldap module configuration
-	#
-	Post-Auth-Type REJECT {
-		# log failed authentications in SQL, too.
-		-sql
-		attr_filter.access_reject
-
-		# Insert EAP-Failure message if the request was
-		# rejected by policy instead of because of an
-		# authentication failure
-		eap
-
-		#  Remove reply message if the response contains an EAP-Message
-		remove_reply_message_if_eap
-	}
-}
-
-#
-#  When the server decides to proxy a request to a home server,
-#  the proxied request is first passed through the pre-proxy
-#  stage.  This stage can re-write the request, or decide to
-#  cancel the proxy.
-#
-#  Only a few modules currently have this method.
-#
-pre-proxy {
-	# Before proxing the request add an Operator-Name attribute identifying
-	# if the operator-name is found for this client.
-	# No need to uncomment this if you have already enabled this in
-	# the authorize section.
-#       operator-name
-
-	#  The client requests the CUI by sending a CUI attribute
-	#  containing one zero byte.
-	#  Uncomment the line below if *requesting* the CUI.
-#       cui
-
-	#  Uncomment the following line if you want to change attributes
-	#  as defined in the preproxy_users file.
-#	files
-
-	#  Uncomment the following line if you want to filter requests
-	#  sent to remote servers based on the rules defined in the
-	#  'attrs.pre-proxy' file.
-#	attr_filter.pre-proxy
-
-	#  If you want to have a log of packets proxied to a home
-	#  server, un-comment the following line, and the
-	#  'detail pre_proxy_log' section, above.
-#	pre_proxy_log
-}
-
-#
-#  When the server receives a reply to a request it proxied
-#  to a home server, the request may be massaged here, in the
-#  post-proxy stage.
-#
-post-proxy {
-
-	#  If you want to have a log of replies from a home server,
-	#  un-comment the following line, and the 'detail post_proxy_log'
-	#  section, above.
-#	post_proxy_log
-
-	#  Uncomment the following line if you want to filter replies from
-	#  remote proxies based on the rules defined in the 'attrs' file.
-#	attr_filter.post-proxy
-
-	#
-	#  If you are proxying LEAP, you MUST configure the EAP
-	#  module, and you MUST list it here, in the post-proxy
-	#  stage.
-	#
-	#  You MUST also use the 'nostrip' option in the 'realm'
-	#  configuration.  Otherwise, the User-Name attribute
-	#  in the proxied request will not match the user name
-	#  hidden inside of the EAP packet, and the end server will
-	#  reject the EAP request.
-	#
-	eap
-
-	#
-	#  If the server tries to proxy a request and fails, then the
-	#  request is processed through the modules in this section.
-	#
-	#  The main use of this section is to permit robust proxying
-	#  of accounting packets.  The server can be configured to
-	#  proxy accounting packets as part of normal processing.
-	#  Then, if the home server goes down, accounting packets can
-	#  be logged to a local "detail" file, for processing with
-	#  radrelay.  When the home server comes back up, radrelay
-	#  will read the detail file, and send the packets to the
-	#  home server.
-	#
-	#  With this configuration, the server always responds to
-	#  Accounting-Requests from the NAS, but only writes
-	#  accounting packets to disk if the home server is down.
-	#
-#	Post-Proxy-Type Fail {
-#			detail
-#	}
-}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dhcp b/src/test/setup/radius-config/freeradius/sites-available/dhcp
deleted file mode 100644
index 42760ef..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/dhcp
+++ /dev/null
@@ -1,279 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a virtual server that handles DHCP.
-#
-#	$Id: 170e2b191af7184b519d3594fa99476c857dfda5 $
-#
-######################################################################
-
-#
-#  The DHCP functionality goes into a virtual server.
-#
-server dhcp {
-
-#  Define a DHCP socket.
-#
-#  The default port below is 6700, so you don't break your network.
-#  If you want it to do real DHCP, change this to 67, and good luck!
-#
-#  You can also bind the DHCP socket to an interface.
-#  See below, and raddb/radiusd.conf for examples.
-#
-#  This lets you run *one* DHCP server instance and have it listen on
-#  multiple interfaces, each with a separate policy.
-#
-#  If you have multiple interfaces, it is a good idea to bind the
-#  listen section to an interface.  You will also need one listen
-#  section per interface.
-#
-#  FreeBSD does *not* support binding sockets to interfaces.  Therefore,
-#  if you have multiple interfaces, broadcasts may go out of the wrong
-#  one, or even all interfaces.  The solution is to use the "setfib" command.
-#  If you have a network "10.10.0/24" on LAN1, you will need to do:
-#
-#  Pick any IP on the 10.10.0/24 network
-#	$ setfib 1 route add default 10.10.0.1
-#
-#  Edit /etc/rc.local, and add a line:
-#	setfib 1 /path/to/radiusd
-#
-#  The kern must be built with the following options:
-#	options    ROUTETABLES=2
-#  or any value larger than 2.
-#
-# The other only solution is to update FreeRADIUS to use BPF sockets.
-#
-listen {
-	#  This is a dhcp socket.
-	type = dhcp
-
-	#  IP address to listen on. Will usually be the IP of the
-	#  interface, or 0.0.0.0
-	ipaddr = 127.0.0.1
-
-	#  source IP address for unicast packets sent by the
-	#  DHCP server.
-	#
-	#  The source IP for unicast packets is chosen from the first
-	#  one of the following items which returns a valid IP
-	#  address:
-	#
-	#	src_ipaddr
-	#	ipaddr
-	#	reply:DHCP-Server-IP-Address
-	#	reply:DHCP-DHCP-Server-Identifier
-	#
-	src_ipaddr = 127.0.0.1
-
-	#  The port should be 67 for a production network. Don't set
-	#  it to 67 on a production network unless you really know
-	#  what you're doing. Even if nothing is configured below, the
-	#  server may still NAK legitimate responses from clients.
-	port = 6700
-
-	#  Interface name we are listening on. See comments above.
-#	interface = lo0
-
-	# The DHCP server defaults to allowing broadcast packets.
-	# Set this to "no" only when the server receives *all* packets
-	# from a relay agent.  i.e. when *no* clients are on the same
-	# LAN as the DHCP server.
-	#
-	# It's set to "no" here for testing. It will usually want to
-	# be "yes" in production, unless you are only dealing with
-	# relayed packets.
-	broadcast = no
-
-	# On Linux if you're running the server as non-root, you
-	# will need to do:
-	#
-	#	sudo setcap cap_net_admin=ei /path/to/radiusd
-	#
-	# This will allow the server to set ARP table entries
-	# for newly allocated IPs
-}
-
-#  Packets received on the socket will be processed through one
-#  of the following sections, named after the DHCP packet type.
-#  See dictionary.dhcp for the packet types.
-
-#  Return packets will be sent to, in preference order:
-#     DHCP-Gateway-IP-Address
-#     DHCP-Client-IP-Address
-#     DHCP-Your-IP-Address
-#  At least one of these attributes should be set at the end of each
-#  section for a response to be sent.
-
-dhcp DHCP-Discover {
-
-	#  Set the type of packet to send in reply.
-	#
-	#  The server will look at the DHCP-Message-Type attribute to
-	#  determine which type of packet to send in reply. Common
-	#  values would be DHCP-Offer, DHCP-Ack or DHCP-NAK. See
-	#  dictionary.dhcp for all the possible values.
-	#
-	#  DHCP-Do-Not-Respond can be used to tell the server to not
-	#  respond.
-	#
-	#  In the event that DHCP-Message-Type is not set then the
-	#  server will fall back to determining the type of reply
-	#  based on the rcode of this section.
-
-	update reply {
-	       DHCP-Message-Type = DHCP-Offer
-	}
-
-	#  The contents here are invented.  Change them!
-	update reply {
-		DHCP-Domain-Name-Server = 127.0.0.1
-		DHCP-Domain-Name-Server = 127.0.0.2
-		DHCP-Subnet-Mask = 255.255.255.0
-		DHCP-Router-Address = 192.0.2.1
-		DHCP-IP-Address-Lease-Time = 86400
-		DHCP-DHCP-Server-Identifier = 192.0.2.1
-	}
-
-	#  Do a simple mapping of MAC to assigned IP.
-	#
-	#  See below for the definition of the "mac2ip"
-	#  module.
-	#
-	#mac2ip
-
-	#  If the MAC wasn't found in that list, do something else.
-	#  You could call a Perl, Python, or Java script here.
-
-	#if (notfound) {
-	# ...
-	#}
-
-	#  Or, allocate IPs from the DHCP pool in SQL. You may need to
-	#  set the pool name here if you haven't set it elsewhere.
-#	update control {
-#		Pool-Name := "local"
-#	}
-#	dhcp_sqlippool
-
-	#  If DHCP-Message-Type is not set, returning "ok" or
-	#  "updated" from this section will respond with a DHCP-Offer
-	#  message.
-	#
-	#  Other rcodes will tell the server to not return any response.
-	ok
-}
-
-dhcp DHCP-Request {
-
-	# Response packet type. See DHCP-Discover section above.
-	update reply {
-	       DHCP-Message-Type = DHCP-Ack
-	}
-
-	#  The contents here are invented.  Change them!
-	update reply {
-		DHCP-Domain-Name-Server = 127.0.0.1
-		DHCP-Domain-Name-Server = 127.0.0.2
-		DHCP-Subnet-Mask = 255.255.255.0
-		DHCP-Router-Address = 192.0.2.1
-		DHCP-IP-Address-Lease-Time = 86400
-		DHCP-DHCP-Server-Identifier = 192.0.2.1
-	}
-
-	#  Do a simple mapping of MAC to assigned IP.
-	#
-	#  See below for the definition of the "mac2ip"
-	#  module.
-	#
-	#mac2ip
-
-	#  If the MAC wasn't found in that list, do something else.
-	#  You could call a Perl, Python, or Java script here.
-
-	#if (notfound) {
-	# ...
-	#}
-
-	#  Or, allocate IPs from the DHCP pool in SQL. You may need to
-	#  set the pool name here if you haven't set it elsewhere.
-#	update control {
-#		Pool-Name := "local"
-#	}
-#	dhcp_sqlippool
-
-	#  If DHCP-Message-Type is not set, returning "ok" or
-	#  "updated" from this section will respond with a DHCP-Ack
-	#  packet.
-	#
-	#  "handled" will not return a packet, all other rcodes will
-	#  send back a DHCP-NAK.
-	ok
-}
-
-#
-#  Other DHCP packet types
-#
-#  There should be a separate section for each DHCP message type.
-#  By default this configuration will ignore them all. Any packet type
-#  not defined here will be responded to with a DHCP-NAK.
-
-dhcp DHCP-Decline {
-	update reply {
-	       DHCP-Message-Type = DHCP-Do-Not-Respond
-	}
-	reject
-}
-
-dhcp DHCP-Inform {
-	update reply {
-	       DHCP-Message-Type = DHCP-Do-Not-Respond
-	}
-	reject
-}
-
-dhcp DHCP-Release {
-	update reply {
-	       DHCP-Message-Type = DHCP-Do-Not-Respond
-	}
-	reject
-}
-
-
-}
-
-######################################################################
-#
-#  This next section is a sample configuration for the "passwd"
-#  module, that reads flat-text files.  It should go into
-#  radiusd.conf, in the "modules" section.
-#
-#  The file is in the format <mac>,<ip>
-#
-#	00:01:02:03:04:05,192.0.2.100
-#	01:01:02:03:04:05,192.0.2.101
-#	02:01:02:03:04:05,192.0.2.102
-#
-#  This lets you perform simple static IP assignment.
-#
-#  There is a preconfigured "mac2ip" module setup in
-#  mods-available/mac2ip. To use it do:
-#
-#    # cd raddb/
-#    # ln -s ../mods-available/mac2ip mods-enabled/mac2ip
-#    # mkdir mods-config/passwd
-#
-#  Then create the file mods-config/passwd/mac2ip with the above
-#  format.
-#
-######################################################################
-
-
-#  This is an example only - see mods-available/mac2ip instead; do
-#  not uncomment these lines here.
-#
-#passwd mac2ip {
-#	filename = ${confdir}/mac2ip
-#	format = "*DHCP-Client-Hardware-Address:=DHCP-Your-IP-Address"
-#	delimiter = ","
-#}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay b/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay
deleted file mode 100644
index 737cc5d..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay
+++ /dev/null
@@ -1,44 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a virtual server that handles DHCP relaying
-#
-#	Only one server can listen on a socket, so you cannot
-#	do DHCP relaying && run a DHCP server at the same time.
-#
-######################################################################
-
-server dhcp.eth1 {
-	listen {
-		ipaddr = *
-		port = 67
-		type = dhcp
-		interface = eth1
-	}
-
-	#  Packets received on the socket will be processed through one
-	#  of the following sections, named after the DHCP packet type.
-	#  See dictionary.dhcp for the packet types.
-	dhcp DHCP-Discover {
-		update config {
-			# IP Address of the DHCP server
-			DHCP-Relay-To-IP-Address := 192.0.2.2
-		}
-		update request {
-			# IP Address of the DHCP relay (ourselves)
-			DHCP-Gateway-IP-Address := 192.0.2.1
-		}
-		ok
-	}
-
-	dhcp DHCP-Request {
-		update config {
-			# IP Address of the DHCP server
-			DHCP-Relay-To-IP-Address := 192.0.2.2
-		}
-		update request {
-			DHCP-Gateway-IP-Address := 192.0.2.2
-		}
-		ok
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients b/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients
deleted file mode 100644
index 8f5edde..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients
+++ /dev/null
@@ -1,224 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	Sample configuration file for dynamically updating the list
-#	of RADIUS clients at run time.
-#
-#	Everything is keyed off of a client "network".  (e.g. 192.0.2/24)
-#	This configuration lets the server know that clients within
-#	that network are defined dynamically.
-#
-#	When the server receives a packet from an unknown IP address
-#	within that network, it tries to find a dynamic definition
-#	for that client.  If the definition is found, the IP address
-#	(and other configuration) is added to the server's internal
-#	cache of "known clients", with a configurable lifetime.
-#
-#	Further packets from that IP address result in the client
-#	definition being found in the cache.  Once the lifetime is
-#	reached, the client definition is deleted, and any new requests
-#	from that client are looked up as above.
-#
-#	If the dynamic definition is not found, then the request is
-#	treated as if it came from an unknown client.  i.e. It is
-#	silently discarded.
-#
-#	As part of protection from Denial of Service (DoS) attacks,
-#	the server will add only one new client per second.  This CANNOT
-#	be changed, and is NOT configurable.
-#
-#	$Id: cdfa6175a9617bcd081b0b69f2c9340c3adaa56e $
-#
-######################################################################
-
-#
-#  Define a network where clients may be dynamically defined.
-client dynamic {
-	ipaddr = 192.0.2.0
-
-	#
-	#  You MUST specify a netmask!
-	#  IPv4 /32 or IPv6 /128 are NOT allowed!
-	netmask = 24
-
-	#
-	#  Any other configuration normally found in a "client"
-	#  entry can be used here.
-
-	#
-	#  A shared secret does NOT have to be defined.  It can
-	#  be left out.
-
-	#
-	#  Define the virtual server used to discover dynamic clients.
-	dynamic_clients = dynamic_clients
-
-	#
-	#  The directory where client definitions are stored.  This
-	#  needs to be used ONLY if the client definitions are stored
-	#  in flat-text files.  Each file in that directory should be
-	#  ONE and only one client definition.  The name of the file
-	#  should be the IP address of the client.
-	#
-	#  If you are storing clients in SQL, this entry should not
-	#  be used.
-#	directory = ${confdir}/dynamic-clients/
-
-	#
-	#  Define the lifetime (in seconds) for dynamic clients.
-	#  They will be cached for this lifetime, and deleted afterwards.
-	#
-	#  If the lifetime is "0", then the dynamic client is never
-	#  deleted.  The only way to delete the client is to re-start
-	#  the server.
-	lifetime = 3600
-}
-
-#
-#  This is the virtual server referenced above by "dynamic_clients".
-server dynamic_clients {
-
-	#
-	#  The only contents of the virtual server is the "authorize" section.
-	authorize {
-
-		#
-		#  Put any modules you want here.  SQL, LDAP, "exec",
-		#  Perl, etc.  The only requirements is that the
-		#  attributes MUST go into the control item list.
-		#
-		#  The request that is processed through this section
-		#  is EMPTY.  There are NO attributes.  The request is fake,
-		#  and is NOT the packet that triggered the lookup of
-		#  the dynamic client.
-		#
-		#  The ONLY piece of useful information is either
-		#
-		#	Packet-Src-IP-Address (IPv4 clients)
-		#	Packet-Src-IPv6-Address (IPv6 clients)
-		#
-		#  The attributes used to define a dynamic client mirror
-		#  the configuration items in the "client" structure.
-		#
-
-		#
-		#  Example 1: Hard-code a client IP.  This example is
-		#             useless, but it documents the attributes
-		#             you need.
-		#
-		update control {
-
-			#
-			#  Echo the IP address of the client.
-			FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
-
-			# require_message_authenticator
-			FreeRADIUS-Client-Require-MA = no
-
-			# secret
-			FreeRADIUS-Client-Secret = "testing123"
-
-			# shortname
-			FreeRADIUS-Client-Shortname = "%{Packet-Src-IP-Address}"
-
-			# nas_type
-			FreeRADIUS-Client-NAS-Type = "other"
-
-			# virtual_server
-			#
-			#  This can ONLY be used if the network client
-			#  definition (e.g. "client dynamic" above) has
-			#  NO virtual_server defined.
-			#
-			#  If the network client definition does have a
-			#  virtual_server defined, then that is used,
-			#  and there is no need to define this attribute.
-			#
-			FreeRADIUS-Client-Virtual-Server = "something"
-
-		}
-
-		#
-		#  Example 2: Read the clients from "clients" files
-		#             in a directory.
-		#
-
-		#             This requires you to uncomment the
-		#             "directory" configuration in the
-		#             "client dynamic" configuration above,
-		#	      and then put one file per IP address in
-		#             that directory.
-		#
-		dynamic_clients
-
-		#
-		#  Example 3: Look the clients up in SQL.
-		#
-		#  This requires the SQL module to be configured, of course.
-		if ("%{sql: SELECT nasname FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}") {
-			update control {
-				#
-				#  Echo the IP.
-				FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
-
-				#
-				#  Do multiple SELECT statements to grab
-				#  the various definitions.
-				FreeRADIUS-Client-Shortname = "%{sql: SELECT shortname FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
-
-				FreeRADIUS-Client-Secret = "%{sql: SELECT secret FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
-
-				FreeRADIUS-Client-NAS-Type = "%{sql: SELECT type FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
-
-				FreeRADIUS-Client-Virtual-Server = "%{sql: SELECT server FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
-			}
-
-		}
-
-		# Do an LDAP lookup in the elements OU, check to see if
-		# the Packet-Src-IP-Address object has a "ou"
-		# attribute, if it does continue.  Change "ACME.COM" to
-		# the real OU of your organization.
-		#
-		# Assuming the following schema:
-		#
-		# OU=Elements,OU=Radius,DC=ACME,DC=COM
-		#
-		# Elements will hold a record of every NAS in your
-		# Network.  Create Group objects based on the IP
-		# Address of the NAS and set the "Location" or "l"
-		# attribute to the NAS Huntgroup the NAS belongs to
-		# allow them to be centrally managed in LDAP.
-		#
-		# e.g.  CN=10.1.2.3,OU=Elements,OU=Radius,DC=ACME,DC=COM
-		#
-		# With a "l" value of "CiscoRTR" for a Cisco Router
-		# that has a NAS-IP-Address or Source-IP-Address of
-		# 10.1.2.3.
-		#
-		# And with a "ou" value of the shared secret password
-		# for the NAS element. ie "password"
-		if ("%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?ou?sub?cn=%{Packet-Src-IP-Address}}") {
-			update control {
-			       FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
-
-				# Set the Client-Shortname to be the Location
-				# "l" just like in the Huntgroups, but this
-				# time to the shortname.
-
-				FreeRADIUS-Client-Shortname = "%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?l?sub?cn=%{Packet-Src-IP-Address}}"
-
-				# Lookup and set the Shared Secret based on
-				# the "ou" attribute.
-				FreeRADIUS-Client-Secret = "%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?ou?sub?cn=%{Packet-Src-IP-Address}}"
-			}
-		}
-
-		#
-		#  Tell the caller that the client was defined properly.
-		#
-		#  If the authorize section does NOT return "ok", then
-		#  the new client is ignored.
-		ok
-	}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/example b/src/test/setup/radius-config/freeradius/sites-available/example
deleted file mode 100644
index 05522ea..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/example
+++ /dev/null
@@ -1,122 +0,0 @@
-######################################################################
-#
-#	An example virtual server configuration.
-#
-#	$Id: e58e24319d6320a0a0d56fd649d937bf95156739 $
-#
-######################################################################
-
-
-#
-#	This client will be available to any "listen" section that
-#	are defined outside of a virtual server section.  However,
-#	when the server receives a packet from this client, the
-#	request will be processed through the "example" virtual
-#	server, as the "client" section contains a configuration item
-#	to that effect.
-#
-#	Note that this client will be able to send requests to any
-#	port defined in a global "listen" section.  It will NOT,
-#	however, be able to send requests to a port defined in a
-#	"listen" section that is contained in a "server" section.
-#
-#	With careful matching of configurations, you should be able
-#	to:
-#
-#	- Define one authentication port, but process each client
-#	  through a separate virtual server.
-#
-#	- define multiple authentication ports, each with a private
-#	  list of clients.
-#
-#	- define multiple authentication ports, each of which may
-#	  have the same client listed, but with different shared
-#	  secrets
-#
-#	FYI: We use an address in the 192.0.2.* space for this example,
-#	as RFC 3330 says that that /24 range is used for documentation
-#	and examples, and should not appear on the net.  You shouldn't
-#	use it for anything, either.
-#
-client 192.0.2.10 {
-	shortname	= example-client
-	secret		= testing123
-	virtual_server  = example
-}
-
-######################################################################
-#
-#	An example virtual server.  It starts off with "server name {"
-#	The "name" is used to reference this server from a "listen"
-#	or "client" section.
-#
-######################################################################
-server example {
-	#
-	#	Listen on 192.0.2.1:1812 for Access-Requests
-	#
-	#	When the server receives a packet, it is processed
-	#	through the "authorize", etc. sections listed here,
-	#	NOT the global ones the "default" site.
-	#
-	listen {
-		ipaddr = 192.0.2.1
-		port = 1821
-		type = auth
-	}
-
-	#
-	#	This client is listed within the "server" section,
-	#	and is therefore known ONLY to the socket defined
-	#	in the "listen" section above.  If the client IP
-	#	sends a request to a different socket, the server
-	#	will treat it as an unknown client, and will not
-	#	respond.
-	#
-	#	In contrast, the client listed at the top of this file
-	#	is outside of any "server" section, and is therefore
-	#	global in scope.  It can send packets to any port
-	#	defined in a global "listen" section.  It CANNOT send
-	#	packets to the listen section defined above, though.
-	#
-	#	Note that you don't have to have a "virtual_server = example"
-	#	line here, as the client is encapsulated within
-	#	the "server" section.
-	#
-	client 192.0.2.9 {
-		shortname	= example-client
-		secret		= testing123
-	}
-
-	authorize {
-		#
-		#  Some example policies.  See "man unlang" for more.
-		#
-		if ("%{User-Name}" == "bob") {
-			update control {
-				Cleartext-Password := "bob"
-			}
-		}
-
-		#
-		#  And then reject the user.  The next line requires
-		#  that the "always reject {}" section is defined in
-		#  the "modules" section of radiusd.conf.
-		#
-		reject
-	}
-
-	authenticate {
-
-	}
-
-	post-auth {
-
-		Post-Auth-Type Reject {
-			update reply {
-				Reply-Message = "This is only an example."
-			}
-		}
-	}
-
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel b/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel
deleted file mode 100644
index dc7b7de..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel
+++ /dev/null
@@ -1,408 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a virtual server that handles *only* inner tunnel
-#	requests for EAP-TTLS and PEAP types.
-#
-#	$Id: 11b6c12d845a1e8287888b3f0a0748d810b2c184 $
-#
-######################################################################
-
-server inner-tunnel {
-
-#
-#  This next section is here to allow testing of the "inner-tunnel"
-#  authentication methods, independently from the "default" server.
-#  It is listening on "localhost", so that it can only be used from
-#  the same machine.
-#
-#	$ radtest USER PASSWORD 127.0.0.1:18120 0 testing123
-#
-#  If it works, you have configured the inner tunnel correctly.  To check
-#  if PEAP will work, use:
-#
-#	$ radtest -t mschap USER PASSWORD 127.0.0.1:18120 0 testing123
-#
-#  If that works, PEAP should work.  If that command doesn't work, then
-#
-#	FIX THE INNER TUNNEL CONFIGURATION SO THAT IT WORKS.
-#
-#  Do NOT do any PEAP tests.  It won't help.  Instead, concentrate
-#  on fixing the inner tunnel configuration.  DO NOTHING ELSE.
-#
-listen {
-       ipaddr = 127.0.0.1
-       port = 18120
-       type = auth
-}
-
-
-#  Authorization. First preprocess (hints and huntgroups files),
-#  then realms, and finally look in the "users" file.
-#
-#  The order of the realm modules will determine the order that
-#  we try to find a matching realm.
-#
-#  Make *sure* that 'preprocess' comes before any realm if you
-#  need to setup hints for the remote radius server
-authorize {
-	#
-	#  The chap module will set 'Auth-Type := CHAP' if we are
-	#  handling a CHAP request and Auth-Type has not already been set
-	chap
-
-	#
-	#  If the users are logging in with an MS-CHAP-Challenge
-	#  attribute for authentication, the mschap module will find
-	#  the MS-CHAP-Challenge attribute, and add 'Auth-Type := MS-CHAP'
-	#  to the request, which will cause the server to then use
-	#  the mschap module for authentication.
-	mschap
-
-	#
-	#  Pull crypt'd passwords from /etc/passwd or /etc/shadow,
-	#  using the system API's to get the password.  If you want
-	#  to read /etc/passwd or /etc/shadow directly, see the
-	#  passwd module, above.
-	#
-#	unix
-
-	#
-	#  Look for IPASS style 'realm/', and if not found, look for
-	#  '@realm', and decide whether or not to proxy, based on
-	#  that.
-#	IPASS
-
-	#
-	#  If you are using multiple kinds of realms, you probably
-	#  want to set "ignore_null = yes" for all of them.
-	#  Otherwise, when the first style of realm doesn't match,
-	#  the other styles won't be checked.
-	#
-	#  Note that proxying the inner tunnel authentication means
-	#  that the user MAY use one identity in the outer session
-	#  (e.g. "anonymous", and a different one here
-	#  (e.g. "user@example.com").  The inner session will then be
-	#  proxied elsewhere for authentication.  If you are not
-	#  careful, this means that the user can cause you to forward
-	#  the authentication to another RADIUS server, and have the
-	#  accounting logs *not* sent to the other server.  This makes
-	#  it difficult to bill people for their network activity.
-	#
-	suffix
-#	ntdomain
-
-	#
-	#  The "suffix" module takes care of stripping the domain
-	#  (e.g. "@example.com") from the User-Name attribute, and the
-	#  next few lines ensure that the request is not proxied.
-	#
-	#  If you want the inner tunnel request to be proxied, delete
-	#  the next few lines.
-	#
-	update control {
-	       Proxy-To-Realm := LOCAL
-	}
-
-	#
-	#  This module takes care of EAP-MSCHAPv2 authentication.
-	#
-	#  It also sets the EAP-Type attribute in the request
-	#  attribute list to the EAP type from the packet.
-	#
-	#  The example below uses module failover to avoid querying all
-	#  of the following modules if the EAP module returns "ok".
-	#  Therefore, your LDAP and/or SQL servers will not be queried
-	#  for the many packets that go back and forth to set up TTLS
-	#  or PEAP.  The load on those servers will therefore be reduced.
-	#
-	eap {
-		ok = return
-	}
-
-	#
-	#  Read the 'users' file
-	files
-
-	#
-	#  Look in an SQL database.  The schema of the database
-	#  is meant to mirror the "users" file.
-	#
-	#  See "Authorization Queries" in sql.conf
-	-sql
-
-	#
-	#  If you are using /etc/smbpasswd, and are also doing
-	#  mschap authentication, the un-comment this line, and
-	#  configure the 'etc_smbpasswd' module, above.
-#	etc_smbpasswd
-
-	#
-	#  The ldap module reads passwords from the LDAP database.
-	-ldap
-
-	#
-	#  Enforce daily limits on time spent logged in.
-#	daily
-
-	expiration
-	logintime
-
-	#
-	#  If no other module has claimed responsibility for
-	#  authentication, then try to use PAP.  This allows the
-	#  other modules listed above to add a "known good" password
-	#  to the request, and to do nothing else.  The PAP module
-	#  will then see that password, and use it to do PAP
-	#  authentication.
-	#
-	#  This module should be listed last, so that the other modules
-	#  get a chance to set Auth-Type for themselves.
-	#
-	pap
-}
-
-
-#  Authentication.
-#
-#
-#  This section lists which modules are available for authentication.
-#  Note that it does NOT mean 'try each module in order'.  It means
-#  that a module from the 'authorize' section adds a configuration
-#  attribute 'Auth-Type := FOO'.  That authentication type is then
-#  used to pick the appropriate module from the list below.
-#
-
-#  In general, you SHOULD NOT set the Auth-Type attribute.  The server
-#  will figure it out on its own, and will do the right thing.  The
-#  most common side effect of erroneously setting the Auth-Type
-#  attribute is that one authentication method will work, but the
-#  others will not.
-#
-#  The common reasons to set the Auth-Type attribute by hand
-#  is to either forcibly reject the user, or forcibly accept him.
-#
-authenticate {
-	#
-	#  PAP authentication, when a back-end database listed
-	#  in the 'authorize' section supplies a password.  The
-	#  password can be clear-text, or encrypted.
-	Auth-Type PAP {
-		pap
-	}
-
-	#
-	#  Most people want CHAP authentication
-	#  A back-end database listed in the 'authorize' section
-	#  MUST supply a CLEAR TEXT password.  Encrypted passwords
-	#  won't work.
-	Auth-Type CHAP {
-		chap
-	}
-
-	#
-	#  MSCHAP authentication.
-	Auth-Type MS-CHAP {
-		mschap
-	}
-
-	#
-	#  Pluggable Authentication Modules.
-#	pam
-
-	# Uncomment it if you want to use ldap for authentication
-	#
-	# Note that this means "check plain-text password against
-	# the ldap database", which means that EAP won't work,
-	# as it does not supply a plain-text password.
-	#
-	#  We do NOT recommend using this.  LDAP servers are databases.
-	#  They are NOT authentication servers.  FreeRADIUS is an
-	#  authentication server, and knows what to do with authentication.
-	#  LDAP servers do not.
-	#
-#	Auth-Type LDAP {
-#		ldap
-#	}
-
-	#
-	#  Allow EAP authentication.
-	eap
-}
-
-######################################################################
-#
-#	There are no accounting requests inside of EAP-TTLS or PEAP
-#	tunnels.
-#
-######################################################################
-
-
-#  Session database, used for checking Simultaneous-Use. Either the radutmp
-#  or rlm_sql module can handle this.
-#  The rlm_sql module is *much* faster
-session {
-	radutmp
-
-	#
-	#  See "Simultaneous Use Checking Queries" in sql.conf
-#	sql
-}
-
-
-#  Post-Authentication
-#  Once we KNOW that the user has been authenticated, there are
-#  additional steps we can take.
-post-auth {
-	#  If you want privacy to remain, see the
-	#  Chargeable-User-Identity attribute from RFC 4372.
-	#  If you want to use it just uncomment the line below.
-#       cui-inner
-
-	#
-	#  If you want to have a log of authentication replies,
-	#  un-comment the following line, and enable the
-	#  'detail reply_log' module.
-#	reply_log
-
-	#
-	#  After authenticating the user, do another SQL query.
-	#
-	#  See "Authentication Logging Queries" in sql.conf
-	-sql
-
-	#
-	#  Instead of sending the query to the SQL server,
-	#  write it into a log file.
-	#
-#	sql_log
-
-	#
-	#  Un-comment the following if you have set
-	#  'edir_account_policy_check = yes' in the ldap module sub-section of
-	#  the 'modules' section.
-	#
-#	ldap
-
-	#
-	#  Access-Reject packets are sent through the REJECT sub-section of the
-	#  post-auth section.
-	#
-	#  Add the ldap module name (or instance) if you have set
-	#  'edir_account_policy_check = yes' in the ldap module configuration
-	#
-	Post-Auth-Type REJECT {
-		# log failed authentications in SQL, too.
-		-sql
-		attr_filter.access_reject
-	}
-
-	#
-	#  The example policy below updates the outer tunnel reply
-	#  (usually Access-Accept) with the User-Name from the inner
-	#  tunnel User-Name.  Since this section is processed in the
-	#  context of the inner tunnel, "request" here means "inner
-	#  tunnel request", and "outer.reply" means "outer tunnel
-	#  reply attributes".
-	#
-	#  This example is most useful when the outer session contains
-	#  a User-Name of "anonymous@....", or a MAC address.  If it
-	#  is enabled, the NAS SHOULD use the inner tunnel User-Name
-	#  in subsequent accounting packets.  This makes it easier to
-	#  track user sessions, as they will all be based on the real
-	#  name, and not on "anonymous".
-	#
-	#  The problem with doing this is that it ALSO exposes the
-	#  real user name to any intermediate proxies.  People use
-	#  "anonymous" identifiers outside of the tunnel for a very
-	#  good reason: it gives them more privacy.  Setting the reply
-	#  to contain the real user name removes ALL privacy from
-	#  their session.
-	#
-	#  If you still want to use the inner tunnel User-Name then
-	#  uncomment the section below, otherwise you may want
-	#  to use  Chargeable-User-Identity attribute from RFC 4372.
-	#  See further on.
-	#update outer.reply {
-	#  User-Name = "%{request:User-Name}"
-	#}
-	#
-}
-
-#
-#  When the server decides to proxy a request to a home server,
-#  the proxied request is first passed through the pre-proxy
-#  stage.  This stage can re-write the request, or decide to
-#  cancel the proxy.
-#
-#  Only a few modules currently have this method.
-#
-pre-proxy {
-	#  Uncomment the following line if you want to change attributes
-	#  as defined in the preproxy_users file.
-#	files
-
-	#  Uncomment the following line if you want to filter requests
-	#  sent to remote servers based on the rules defined in the
-	#  'attrs.pre-proxy' file.
-#	attr_filter.pre-proxy
-
-	#  If you want to have a log of packets proxied to a home
-	#  server, un-comment the following line, and the
-	#  'detail pre_proxy_log' section, above.
-#	pre_proxy_log
-}
-
-#
-#  When the server receives a reply to a request it proxied
-#  to a home server, the request may be massaged here, in the
-#  post-proxy stage.
-#
-post-proxy {
-
-	#  If you want to have a log of replies from a home server,
-	#  un-comment the following line, and the 'detail post_proxy_log'
-	#  section, above.
-#	post_proxy_log
-
-	#  Uncomment the following line if you want to filter replies from
-	#  remote proxies based on the rules defined in the 'attrs' file.
-#	attr_filter.post-proxy
-
-	#
-	#  If you are proxying LEAP, you MUST configure the EAP
-	#  module, and you MUST list it here, in the post-proxy
-	#  stage.
-	#
-	#  You MUST also use the 'nostrip' option in the 'realm'
-	#  configuration.  Otherwise, the User-Name attribute
-	#  in the proxied request will not match the user name
-	#  hidden inside of the EAP packet, and the end server will
-	#  reject the EAP request.
-	#
-	eap
-
-	#
-	#  If the server tries to proxy a request and fails, then the
-	#  request is processed through the modules in this section.
-	#
-	#  The main use of this section is to permit robust proxying
-	#  of accounting packets.  The server can be configured to
-	#  proxy accounting packets as part of normal processing.
-	#  Then, if the home server goes down, accounting packets can
-	#  be logged to a local "detail" file, for processing with
-	#  radrelay.  When the home server comes back up, radrelay
-	#  will read the detail file, and send the packets to the
-	#  home server.
-	#
-	#  With this configuration, the server always responds to
-	#  Accounting-Requests from the NAS, but only writes
-	#  accounting packets to disk if the home server is down.
-	#
-#	Post-Proxy-Type Fail {
-#			detail
-#	}
-
-}
-
-} # inner-tunnel server block
diff --git a/src/test/setup/radius-config/freeradius/sites-available/originate-coa b/src/test/setup/radius-config/freeradius/sites-available/originate-coa
deleted file mode 100644
index 79e2f1d..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/originate-coa
+++ /dev/null
@@ -1,190 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#  The server can originate Change of Authorization (CoA) or
-#  Disconnect request packets.  These packets are used to dynamically
-#  change the parameters of a users session (bandwidth, etc.), or
-#  to forcibly disconnect the user.
-#
-#  There are some caveats.  Not all NAS vendors support this
-#  functionality.  Even for the ones that do, it may be difficult to
-#  find out what needs to go into a CoA-Request or Disconnect-Request
-#  packet.  All we can suggest is to read the NAS documentation
-#  available from the vendor.  That documentation SHOULD describe
-#  what information their equipment needs to see in a CoA packet.
-#
-#  This information is usually a list of attributes such as:
-#
-#	NAS-IP-Address (or NAS-IPv6 address)
-#	NAS-Identifier
-#	User-Name
-#	Acct-Session-Id
-#
-#  CoA packets can be originated when a normal Access-Request or
-#  Accounting-Request packet is received.  Simply update the
-#  "coa" list:
-#
-#	update coa {
-#	       User-Name = "%{User-Name}"
-#	       Acct-Session-Id = "%{Acct-Session-Id}"
-#	       NAS-IP-Address = "%{NAS-IP-Address}"
-#	}
-#
-#  And the CoA packet will be sent.  You can also send Disconnect
-#  packets by using "update disconnect { ...".
-#
-#  This "update coa" entry can be placed in any section (authorize,
-#  preacct, etc.), EXCEPT for pre-proxy and post-proxy.  The CoA
-#  packets CANNOT be sent if the original request has been proxied.
-#
-#  The CoA functionality works best when the RADIUS server and
-#  the NAS receiving CoA packets are on the same network.
-#
-#  If "update coa { ... " is used, and then later it becomes necessary
-#  to not send a CoA request, the following example can suppress the
-#  CoA packet:
-#
-#	update control {
-#		Send-CoA-Request = No
-#	}
-#
-#  The default destination of a CoA packet is the NAS (or client)
-#  the sent the original Access-Request or Accounting-Request.  See
-#  raddb/clients.conf for a "coa_server" configuration that ties
-#  a client to a specific home server, or to a home server pool.
-#
-#  If you need to send the packet to a different destination, update
-#  the "coa" list with one of:
-#
-#	Packet-Dst-IP-Address = ...
-#	Packet-Dst-IPv6-Address = ...
-#	Home-Server-Pool = ...
-#
-#  That specifies an Ipv4 or IPv6 address, or a home server pool
-#  (such as the "coa" pool example below).  This use is not
-#  recommended, however,  It is much better to point the client
-#  configuration directly at the CoA server/pool, as outlined
-#  earlier.
-#
-#  If the CoA port is non-standard, you can also set:
-#
-#	Packet-Dst-Port
-#
-#  to have the value of the port.
-#
-######################################################################
-
-#
-#  When CoA packets are sent to a NAS, the NAS is acting as a
-#  server (see RFC 5176).  i.e. it has a type (accepts CoA and/or
-#  Disconnect packets), an IP address (or IPv6 address), a
-#  destination port, and a shared secret.
-#
-#  This information *cannot* go into a "client" section.  In the future,
-#  FreeRADIUS will be able to receive, and to proxy CoA packets.
-#  Having the CoA configuration as below means that we can later do
-#  load-balancing, fail-over, etc. of CoA servers.  If the CoA
-#  configuration went into a "client" section, it would be impossible
-#  to do proper proxying of CoA requests.
-#
-home_server localhost-coa {
-	type = coa
-
-	#
-	#  Note that a home server of type "coa" MUST be a real NAS,
-	#  with an ipaddr or ipv6addr.  It CANNOT point to a virtual
-	#  server.
-	#
-	ipaddr = 127.0.0.1
-	port = 3799
-
-	#  This secret SHOULD NOT be the same as the shared
-	#  secret in a "client" section.
-	secret = testing1234
-
-	#  CoA specific parameters.  See raddb/proxy.conf for details.
-	coa {
-		irt = 2
-		mrt = 16
-		mrc = 5
-		mrd = 30
-	}
-}
-
-#
-#  CoA servers can be put into pools, just like normal servers.
-#
-home_server_pool coa {
-	type = fail-over
-
-	# Point to the CoA server above.
-	home_server = localhost-coa
-
-	#  CoA requests are run through the pre-proxy section.
-	#  CoA responses are run through the post-proxy section.
-	virtual_server = originate-coa.example.com
-
-	#
-	#  Home server pools of type "coa" cannot (currently) have
-	#  a "fallback" configuration.
-	#
-}
-
-#
-#  When this virtual server is run, the original request has FINISHED
-#  processing.  i.e. the reply has already been sent to the NAS.
-#  You can access the attributes in the original packet, reply, and
-#  control items, but changing them will have NO EFFECT.
-#
-#  The CoA packet is in the "proxy-request" attribute list.
-#  The CoA reply (if any) is in the "proxy-reply" attribute list.
-#
-server originate-coa.example.com {
-  pre-proxy {
-	update proxy-request {
-		NAS-IP-Address = 127.0.0.1
-	}
-  }
-
-  #
-  # Handle the responses here.
-  #
-  post-proxy {
-	switch "%{proxy-reply:Packet-Type}" {
-		case CoA-ACK {
-			ok
-		}
-
-		case CoA-NAK {
-			# the NAS didn't like the CoA request
-			ok
-		}
-
-		case Disconnect-ACK {
-			ok
-		}
-
-		case Disconnect-NAK {
-			# the NAS didn't like the Disconnect request
-			ok
-		}
-
-		# Invalid packet type.  This shouldn't happen.
-		case {
-		     fail
-		}
-	}
-
-	#
-	#  These methods are run when there is NO response
-	#  to the request.
-	#
-	Post-Proxy-Type Fail-CoA {
-		ok
-	}
-
-	Post-Proxy-Type Fail-Disconnect {
-		ok
-	}
-  }
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel b/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel
deleted file mode 100644
index 1ce4137..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a virtual server that handles *only* inner tunnel
-#	requests for EAP-TTLS and PEAP types.
-#
-#	$Id$
-#
-######################################################################
-
-server proxy-inner-tunnel {
-
-#
-#  This example is very simple.  All inner tunnel requests get
-#  proxied to another RADIUS server.
-#
-authorize {
-	#
-	#  Do other things here, as necessary.
-	#
-	#  e.g. run the "realms" module, to decide how to proxy
-	#  the inner tunnel request.
-	#
-
-	update control {
-		#  You should update this to be one of your realms.
-		Proxy-To-Realm := "example.com"
-	}
-}
-
-authenticate {
-	#
-	#  This is necessary so that the inner tunnel EAP-MSCHAPv2
-	#  method can be called.  That method takes care of turning
-	#  EAP-MSCHAPv2 into plain MS-CHAPv2, if necessary.
-	eap
-}
-
-post-proxy {
-	#
-	#  This is necessary for LEAP, or if you set:
-	#
-	#  proxy_tunneled_request_as_eap = no
-	#
-	eap
-}
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting b/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting
deleted file mode 100644
index 9bf8697..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting
+++ /dev/null
@@ -1,167 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	This is a sample configuration for robust proxy accounting.
-#	accounting packets are proxied, OR logged locally if all
-#	home servers are down.  When the home servers come back up,
-#	the accounting packets are forwarded.
-#
-#	This method enables the server to proxy all packets to the
-#	home servers when they're up, AND to avoid writing to the
-#	detail file in most situations.
-#
-#	In most situations, proxying of accounting messages is done
-#	in a "pass-through" fashion.  If the home server does not
-#	respond, then the proxy server does not respond to the NAS.
-#	That means that the NAS must retransmit packets, sometimes
-#	forever.  This example shows how the proxy server can still
-#	respond to the NAS, even if all home servers are down.
-#
-#	This configuration could be done MUCH more simply if ALL
-#	packets were written to the detail file.  But that would
-#	involve a lot more disk writes, which may not be a good idea.
-#
-#	This file is NOT meant to be used as-is.  It needs to be
-#	edited to match your local configuration.
-#
-#	$Id$
-#
-######################################################################
-
-#  (1) Define two home servers.
-home_server home1.example.com {
-	type = acct
-	ipaddr = 192.0.2.10
-	port = 1813
-	secret = testing123
-
-	#  Mark this home server alive ONLY when it starts being responsive
-	status_check = request
-	username = "test_user_status_check"
-
-	#  Set the response timeout aggressively low.
-	#  You MAY have to increase this, depending on tests with
-	#  your local installation.
-	response_window = 6
-}
-
-home_server home2.example.com {
-	type = acct
-	ipaddr = 192.0.2.20
-	port = 1813
-	secret = testing123
-
-	#  Mark this home server alive ONLY when it starts being responsive
-	status_check = request
-	username = "test_user_status_check"
-
-	#  Set the response timeout aggressively low.
-	#  You MAY have to increase this, depending on tests with
-	#  your local installation.
-	response_window = 6
-}
-
-#  (2) Define a virtual server to be used when both of the
-#  home servers are down.
-home_server acct_detail.example.com {
-	virtual_server = acct_detail.example.com
-}
-
-#  Put all of the servers into a pool.
-home_server_pool acct_pool.example.com {
-	type = load-balance	# other types are OK, too.
-
-	home_server = home1.example.com
-	home_server = home2.example.com
-	# add more home_server's here.
-
-	# If all home servers are down, try a home server that
-	# is a local virtual server.
-	fallback = acct_detail.example.com
-
-	# for pre/post-proxy policies
-	virtual_server = home.example.com
-}
-
-#  (3) Define a realm for these home servers.
-#  It should NOT be used as part of normal proxying decisions!
-realm acct_realm.example.com {
-	acct_pool = acct_pool.example.com
-}
-
-#  (4) Define a detail file writer.
-#   See raddb/modules/detail.example.com
-
-#  (5) Define the virtual server to write the packets to the detail file
-#  This will be called when ALL home servers are down, because of the
-#  "fallback" configuration in the home server pool.
-server acct_detail.example.com {
-	accounting {
-		detail.example.com
-	}
-}
-
-#  (6) Define a virtual server to handle pre/post-proxy re-writing
-server home.example.com {
-	pre-proxy {
-		#  Insert pre-proxy rules here
-	}
-
-	post-proxy {
-		#  Insert post-proxy rules here
-
-		#  This will be called when the CURRENT packet failed
-		#  to be proxied.  This may happen when one home server
-		#  suddenly goes down, even though another home server
-		#  may be alive.
-		#
-		#  i.e. the current request has run out of time, so it
-		#  cannot fail over to another (possibly) alive server.
-		#
-		#  We want to respond to the NAS, so that it can stop
-		#  re-sending the packet.  We write the packet to the
-		#  "detail" file, where it will be read, and sent to
-		#  another home server.
-		#
-		Post-Proxy-Type Fail {
-			detail.example.com
-		}
-	}
-
-
-	#  Read accounting packets from the detail file(s) for
-	#  the home server.
-	#
-	#  Note that you can have only ONE "listen" section reading
-	#  detail files from a particular directory.  That is why the
-	#  destination host name is used as part of the directory name
-	#  below.  Having two "listen" sections reading detail files
-	#  from the same directory WILL cause problems.  The packets
-	#  may be read by one, the other, or both "listen" sections.
-	listen {
-		type = detail
-		filename = "${radacctdir}/detail.example.com/detail-*:*"
-		load_factor = 10
-	}
-
-	#  All packets read from the detail file are proxied back to
-	#  the home servers.
-	#
-	#  The normal pre/post-proxy rules are applied to them, too.
-	#
-	#  If the home servers are STILL down, then the server stops
-	#  reading the detail file, and queues the packets for a later
-	#  retransmission.  The Post-Proxy-Type "Fail" handler is NOT
-	#  called.
-	#
-	#  When the home servers come back up, the packets are forwarded,
-	#  and the detail file processed as normal.
-	accounting {
-		# You may want accounting policies here...
-
-		update control {
-			Proxy-To-Realm := "acct_realm.example.com"
-		}
-	}
-
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/soh b/src/test/setup/radius-config/freeradius/sites-available/soh
deleted file mode 100644
index 9196e5b..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/soh
+++ /dev/null
@@ -1,34 +0,0 @@
-# This is a simple server for the MS SoH requests generated by the
-# peap module - see "eap.conf" for more info
-
-# Requests are ONLY passed through the authorize section, and cannot
-# current be proxied (in any event, the radius attributes used are
-# internal).
-
-server soh-server {
-	authorize {
-		if (SoH-Supported == no) {
-			# client NAKed our request for SoH - not supported, or turned off
-			update config {
-				Auth-Type = Accept
-			}
-		}
-		else {
-			# client replied; check something - this is a local policy issue!
-			if (SoH-MS-Windows-Health-Status =~ /antivirus (warn|error) /) {
-				update config {
-					Auth-Type = Reject
-				}
-				update reply {
-					Reply-Message = "You must have antivirus enabled & installed!"
-				}
-			}
-			else {
-				update config {
-					Auth-Type = Accept
-				}
-			}
-		}
-	}
-}
-
diff --git a/src/test/setup/radius-config/freeradius/sites-available/status b/src/test/setup/radius-config/freeradius/sites-available/status
deleted file mode 100644
index 5432203..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/status
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	A virtual server to handle ONLY Status-Server packets.
-#
-#	Server statistics can be queried with a properly formatted
-#	Status-Server request.  See dictionary.freeradius for comments.
-#
-#	If radiusd.conf has "status_server = yes", then any client
-#	will be able to send a Status-Server packet to any port
-#	(listen section type "auth", "acct", or "status"), and the
-#	server will respond.
-#
-#	If radiusd.conf has "status_server = no", then the server will
-#	ignore Status-Server packets to "auth" and "acct" ports.  It
-#	will respond only if the Status-Server packet is sent to a
-#	"status" port.
-#
-#	The server statistics are available ONLY on socket of type
-#	"status".  Queries for statistics sent to any other port
-#	are ignored.
-#
-#	Similarly, a socket of type "status" will not process
-#	authentication or accounting packets.  This is for security.
-#
-#	$Id: e7d4346310b837d56bffe4c991b4e5680742ebc0 $
-#
-######################################################################
-
-server status {
-	listen {
-		#  ONLY Status-Server is allowed to this port.
-		#  ALL other packets are ignored.
-		type = status
-
-		ipaddr = 127.0.0.1
-		port = 18121
-	}
-
-	#
-	#  We recommend that you list ONLY management clients here.
-	#  i.e. NOT your NASes or Access Points, and for an ISP,
-	#  DEFINITELY not any RADIUS servers that are proxying packets
-	#  to you.
-	#
-	#  If you do NOT list a client here, then any client that is
-	#  globally defined (i.e. all of them) will be able to query
-	#  these statistics.
-	#
-	#  Do you really want your partners seeing the internal details
-	#  of what your RADIUS server is doing?
-	#
-	client admin {
-		ipaddr = 127.0.0.1
-		secret = adminsecret
-	}
-
-	#
-	#  Simple authorize section.  The "Autz-Type Status-Server"
-	#  section will work here, too.  See "raddb/sites-available/default".
-	authorize {
-		ok
-
-		# respond to the Status-Server request.
-		Autz-Type Status-Server {
-			ok
-		}
-	}
-}
-
-#	Statistics can be queried via a number of methods:
-#
-#	All packets received/sent by the server (1 = auth, 2 = acct)
-#		FreeRADIUS-Statistics-Type = 3
-#
-#	All packets proxied by the server (4 = proxy-auth, 8 = proxy-acct)
-#		FreeRADIUS-Statistics-Type = 12
-#
-#	All packets sent && received:
-#		FreeRADIUS-Statistics-Type = 15
-#
-#	Internal server statistics:
-#		FreeRADIUS-Statistics-Type = 16
-#
-#	All packets for a particular client (globally defined)
-#		FreeRADIUS-Statistics-Type = 35
-#		FreeRADIUS-Stats-Client-IP-Address = 192.0.2.1
-#
-#	All packets for a client attached to a "listen" ip/port
-#		FreeRADIUS-Statistics-Type = 35
-#		FreeRADIUS-Stats-Client-IP-Address = 192.0.2.1
-#		FreeRADIUS-Stats-Server-IP-Address = 127.0.0.1
-#		FreeRADIUS-Stats-Server-Port = 1812
-#
-#	All packets for a "listen" IP/port
-#		FreeRADIUS-Statistics-Type = 67
-#		FreeRADIUS-Stats-Server-IP-Address = 127.0.0.1
-#		FreeRADIUS-Stats-Server-Port = 1812
-#
-#	All packets for a home server IP / port
-#		FreeRADIUS-Statistics-Type = 131
-#		FreeRADIUS-Stats-Server-IP-Address = 192.0.2.2
-#		FreeRADIUS-Stats-Server-Port = 1812
-
-#
-#  You can also get exponentially weighted moving averages of
-#  response times (in usec) of home servers.  Just set the config
-#  item "historic_average_window" in a home_server section.
-#
-#  By default it is zero (don't calculate it).  Useful values
-#  are between 100, and 10,000.  The server will calculate and
-#  remember the moving average for this window, and for 10 times
-#  that window.
-#
-
-#
-#  Some of this could have been simplified.  e.g. the proxy-auth and
-#  proxy-acct bits aren't completely necessary.  But using them permits
-#  the server to be queried for ALL inbound && outbound packets at once.
-#  This gives a good snapshot of what the server is doing.
-#
-#  Due to internal limitations, the statistics might not be exactly up
-#  to date.  Do not expect all of the numbers to add up perfectly.
-#  The Status-Server packets are also counted in the total requests &&
-#  responses.  The responses are counted only AFTER the response has
-#  been sent.
-#
diff --git a/src/test/setup/radius-config/freeradius/sites-available/tls b/src/test/setup/radius-config/freeradius/sites-available/tls
deleted file mode 100644
index 0874951..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/tls
+++ /dev/null
@@ -1,506 +0,0 @@
-######################################################################
-#
-#  Initial implementation of RADIUS over TLS (radsec)
-#
-######################################################################
-
-listen {
-	ipaddr = *
-	port = 2083
-
-	#
-	#  TCP and TLS sockets can accept Access-Request and
-	#  Accounting-Request on the same socket.
-	#
-	#	auth	  = only Access-Request
-	#	acct	  = only Accounting-Request
-	#	auth+acct = both
-	#
-	type = auth+acct
-
-	# For now, only TCP transport is allowed.
-	proto = tcp
-
-	# Send packets to the default virtual server
-	virtual_server = default
-
-	clients = radsec
-
-	#
-	#  Connection limiting for sockets with "proto = tcp".
-	#
-	limit {
-	      #
-	      #  Limit the number of simultaneous TCP connections to the socket
-	      #
-	      #  The default is 16.
-	      #  Setting this to 0 means "no limit"
-	      max_connections = 16
-
-	      #  The per-socket "max_requests" option does not exist.
-
-	      #
-	      #  The lifetime, in seconds, of a TCP connection.  After
-	      #  this lifetime, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "forever".
-	      lifetime = 0
-
-	      #
-	      #  The idle timeout, in seconds, of a TCP connection.
-	      #  If no packets have been received over the connection for
-	      #  this time, the connection will be closed.
-	      #
-	      #  Setting this to 0 means "no timeout".
-	      #
-	      #  We STRONGLY RECOMMEND that you set an idle timeout.
-	      #
-	      idle_timeout = 30
-	}
-
-	#  This is *exactly* the same configuration as used by the EAP-TLS
-	#  module.  It's OK for testing, but for production use it's a good
-	#  idea to use different server certificates for EAP and for RADIUS
-	#  transport.
-	#
-	#  If you want only one TLS configuration for multiple sockets,
-	#  then we suggest putting "tls { ...}" into radiusd.conf.
-	#  The subsection below can then be changed into a reference:
-	#
-	#	tls = ${tls}
-	#
-	#  Which means "the tls sub-section is not here, but instead is in
-	#  the top-level section called 'tls'".
-	#
-	#  If you have multiple tls configurations, you can put them into
-	#  sub-sections of a top-level "tls" section.  There's no need to
-	#  call them all "tls".  You can then use:
-	#
-	#	tls = ${tls.site1}
-	#
-	#  to refer to the "site1" sub-section of the "tls" section.
-	#
-	tls {
-		private_key_password = whatever
-		private_key_file = ${certdir}/server.pem
-
-		#  If Private key & Certificate are located in
-		#  the same file, then private_key_file &
-		#  certificate_file must contain the same file
-		#  name.
-		#
-		#  If ca_file (below) is not used, then the
-		#  certificate_file below MUST include not
-		#  only the server certificate, but ALSO all
-		#  of the CA certificates used to sign the
-		#  server certificate.
-		certificate_file = ${certdir}/server.pem
-
-		#  Trusted Root CA list
-		#
-		#  ALL of the CA's in this list will be trusted
-		#  to issue client certificates for authentication.
-		#
-		#  In general, you should use self-signed
-		#  certificates for 802.1x (EAP) authentication.
-		#  In that case, this CA file should contain
-		#  *one* CA certificate.
-		#
-		#  This parameter is used only for EAP-TLS,
-		#  when you issue client certificates.  If you do
-		#  not use client certificates, and you do not want
-		#  to permit EAP-TLS authentication, then delete
-		#  this configuration item.
-		ca_file = ${cadir}/ca.pem
-
-		#
-		#  For DH cipher suites to work, you have to
-		#  run OpenSSL to create the DH file first:
-		#
-		#  	openssl dhparam -out certs/dh 1024
-		#
-		dh_file = ${certdir}/dh
-
-		#
-		#  If your system doesn't have /dev/urandom,
-		#  you will need to create this file, and
-		#  periodically change its contents.
-		#
-		#  For security reasons, FreeRADIUS doesn't
-		#  write to files in its configuration
-		#  directory.
-		#
-#		random_file = ${certdir}/random
-
-		#
-		#  The default fragment size is 1K.
-		#  However, it's possible to send much more data than
-		#  that over a TCP connection.  The upper limit is 64K.
-		#  Setting the fragment size to more than 1K means that
-		#  there are fewer round trips when setting up a TLS
-		#  connection.  But only if the certificates are large.
-		#
-		fragment_size = 8192
-
-		#  include_length is a flag which is
-		#  by default set to yes If set to
-		#  yes, Total Length of the message is
-		#  included in EVERY packet we send.
-		#  If set to no, Total Length of the
-		#  message is included ONLY in the
-		#  First packet of a fragment series.
-		#
-	#	include_length = yes
-
-		#  Check the Certificate Revocation List
-		#
-		#  1) Copy CA certificates and CRLs to same directory.
-		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
-		#    'c_rehash' is OpenSSL's command.
-		#  3) uncomment the line below.
-		#  5) Restart radiusd
-	#	check_crl = yes
-		ca_path = ${cadir}
-
-	       #
-	       #  If check_cert_issuer is set, the value will
-	       #  be checked against the DN of the issuer in
-	       #  the client certificate.  If the values do not
-	       #  match, the certificate verification will fail,
-	       #  rejecting the user.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-Issuer attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
-
-	       #
-	       #  If check_cert_cn is set, the value will
-	       #  be xlat'ed and checked against the CN
-	       #  in the client certificate.  If the values
-	       #  do not match, the certificate verification
-	       #  will fail rejecting the user.
-	       #
-	       #  This check is done only if the previous
-	       #  "check_cert_issuer" is not set, or if
-	       #  the check succeeds.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-CN attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#	check_cert_cn = %{User-Name}
-	#
-		# Set this option to specify the allowed
-		# TLS cipher suites.  The format is listed
-		# in "man 1 ciphers".
-		cipher_list = "DEFAULT"
-
-		#
-
-		#  This configuration entry should be deleted
-		#  once the server is running in a normal
-		#  configuration.  It is here ONLY to make
-		#  initial deployments easier.
-		#
-		#
-		#  This is enabled in eap.conf, so we don't need it here.
-		#
-#		make_cert_command = "${certdir}/bootstrap"
-
-		#
-		#  Session resumption / fast reauthentication
-		#  cache.
-		#
-		#  The cache contains the following information:
-		#
-		#  session Id - unique identifier, managed by SSL
-		#  User-Name  - from the Access-Accept
-		#  Stripped-User-Name - from the Access-Request
-		#  Cached-Session-Policy - from the Access-Accept
-		#
-		#  The "Cached-Session-Policy" is the name of a
-		#  policy which should be applied to the cached
-		#  session.  This policy can be used to assign
-		#  VLANs, IP addresses, etc.  It serves as a useful
-		#  way to re-apply the policy from the original
-		#  Access-Accept to the subsequent Access-Accept
-		#  for the cached session.
-		#
-		#  On session resumption, these attributes are
-		#  copied from the cache, and placed into the
-		#  reply list.
-		#
-		#  You probably also want "use_tunneled_reply = yes"
-		#  when using fast session resumption.
-		#
-		cache {
-		      #
-		      #  Enable it.  The default is "no".
-		      #  Deleting the entire "cache" subsection
-		      #  Also disables caching.
-		      #
-		      #  You can disallow resumption for a
-		      #  particular user by adding the following
-		      #  attribute to the control item list:
-		      #
-		      #		Allow-Session-Resumption = No
-		      #
-		      #  If "enable = no" below, you CANNOT
-		      #  enable resumption for just one user
-		      #  by setting the above attribute to "yes".
-		      #
-		      enable = no
-
-		      #
-		      #  Lifetime of the cached entries, in hours.
-		      #  The sessions will be deleted after this
-		      #  time.
-		      #
-		      lifetime = 24 # hours
-
-		      #
-		      #  The maximum number of entries in the
-		      #  cache.  Set to "0" for "infinite".
-		      #
-		      #  This could be set to the number of users
-		      #  who are logged in... which can be a LOT.
-		      #
-		      max_entries = 255
-
-		      #
-		      #  Internal "name" of the session cache.
-		      #  Used to distinguish which TLS context
-		      #  sessions belong to.
-		      #
-		      #  The server will generate a random value
-		      #  if unset. This will change across server
-		      #  restart so you MUST set the "name" if you
-		      #  want to persist sessions (see below).
-		      #
-		      #  If you use IPv6, change the "ipaddr" below
-		      #  to "ipv6addr"
-		      #
-		      #name = "TLS ${..ipaddr} ${..port} ${..proto}"
-
-		      #
-		      #  Simple directory-based storage of sessions.
-		      #  Two files per session will be written, the SSL
-		      #  state and the cached VPs. This will persist session
-		      #  across server restarts.
-		      #
-		      #  The server will need write perms, and the directory
-		      #  should be secured from anyone else. You might want
-		      #  a script to remove old files from here periodically:
-		      #
-		      #    find ${logdir}/tlscache -mtime +2 -exec rm -f {} \;
-		      #
-		      #  This feature REQUIRES "name" option be set above.
-		      #
-		      #persist_dir = "${logdir}/tlscache"
-		}
-
-		#
-		#  Require a client certificate.
-		#
-		require_client_cert = yes
-
-		#
-		#  As of version 2.1.10, client certificates can be
-		#  validated via an external command.  This allows
-		#  dynamic CRLs or OCSP to be used.
-		#
-		#  This configuration is commented out in the
-		#  default configuration.  Uncomment it, and configure
-		#  the correct paths below to enable it.
-		#
-		verify {
-			#  A temporary directory where the client
-			#  certificates are stored.  This directory
-			#  MUST be owned by the UID of the server,
-			#  and MUST not be accessible by any other
-			#  users.  When the server starts, it will do
-			#  "chmod go-rwx" on the directory, for
-			#  security reasons.  The directory MUST
-			#  exist when the server starts.
-			#
-			#  You should also delete all of the files
-			#  in the directory when the server starts.
-	#     		tmpdir = /tmp/radiusd
-
-			#  The command used to verify the client cert.
-			#  We recommend using the OpenSSL command-line
-			#  tool.
-			#
-			#  The ${..ca_path} text is a reference to
-			#  the ca_path variable defined above.
-			#
-			#  The %{TLS-Client-Cert-Filename} is the name
-			#  of the temporary file containing the cert
-			#  in PEM format.  This file is automatically
-			#  deleted by the server when the command
-			#  returns.
-	#    		client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}"
-		}
-	}
-}
-
-clients radsec {
-	client 127.0.0.1 {
-		ipaddr = 127.0.0.1
-
-		#
-		#  Ensure that this client is TLS *only*.
-		#
-		proto = tls
-
-		#
-		#  TCP clients can have any shared secret.
-		#
-		#  TLS clients MUST have the shared secret
-		#  set to "radsec".  Or, for "proto = tls",
-		#  you can omit the secret, and it will
-		#  automatically be set to "radsec".
-		#
-		secret = radsec
-
-		#
-		#  You can also use a "limit" section here.
-		#  See raddb/clients.conf for examples.
-		#
-		#  Note that BOTH limits are applied.  You
-		#  should therefore set the "listen" limits
-		#  higher than the ones for each individual
-		#  client.
-		#
-	}
-}
-
-home_server tls {
-	ipaddr = 127.0.0.1
-	port = 2083
-	type = auth
-	secret = testing123
-	proto = tcp
-	status_check = none
-
-	tls {
-		private_key_password = whatever
-		private_key_file = ${certdir}/client.pem
-
-		#  If Private key & Certificate are located in
-		#  the same file, then private_key_file &
-		#  certificate_file must contain the same file
-		#  name.
-		#
-		#  If ca_file (below) is not used, then the
-		#  certificate_file below MUST include not
-		#  only the server certificate, but ALSO all
-		#  of the CA certificates used to sign the
-		#  server certificate.
-		certificate_file = ${certdir}/client.pem
-
-		#  Trusted Root CA list
-		#
-		#  ALL of the CA's in this list will be trusted
-		#  to issue client certificates for authentication.
-		#
-		#  In general, you should use self-signed
-		#  certificates for 802.1x (EAP) authentication.
-		#  In that case, this CA file should contain
-		#  *one* CA certificate.
-		#
-		#  This parameter is used only for EAP-TLS,
-		#  when you issue client certificates.  If you do
-		#  not use client certificates, and you do not want
-		#  to permit EAP-TLS authentication, then delete
-		#  this configuration item.
-		ca_file = ${cadir}/ca.pem
-
-		#
-		#  For DH cipher suites to work, you have to
-		#  run OpenSSL to create the DH file first:
-		#
-		#  	openssl dhparam -out certs/dh 1024
-		#
-		dh_file = ${certdir}/dh
-		random_file = ${certdir}/random
-
-		#
-		#  The default fragment size is 1K.
-		#  However, TLS can send 64K of data at once.
-		#  It can be useful to set it higher.
-		#
-		fragment_size = 8192
-
-		#  include_length is a flag which is
-		#  by default set to yes If set to
-		#  yes, Total Length of the message is
-		#  included in EVERY packet we send.
-		#  If set to no, Total Length of the
-		#  message is included ONLY in the
-		#  First packet of a fragment series.
-		#
-	#	include_length = yes
-
-		#  Check the Certificate Revocation List
-		#
-		#  1) Copy CA certificates and CRLs to same directory.
-		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
-		#    'c_rehash' is OpenSSL's command.
-		#  3) uncomment the line below.
-		#  5) Restart radiusd
-	#	check_crl = yes
-		ca_path = ${cadir}
-
-	       #
-	       #  If check_cert_issuer is set, the value will
-	       #  be checked against the DN of the issuer in
-	       #  the client certificate.  If the values do not
-	       #  match, the certificate verification will fail,
-	       #  rejecting the user.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-Issuer attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
-
-	       #
-	       #  If check_cert_cn is set, the value will
-	       #  be xlat'ed and checked against the CN
-	       #  in the client certificate.  If the values
-	       #  do not match, the certificate verification
-	       #  will fail rejecting the user.
-	       #
-	       #  This check is done only if the previous
-	       #  "check_cert_issuer" is not set, or if
-	       #  the check succeeds.
-	       #
-	       #  In 2.1.10 and later, this check can be done
-	       #  more generally by checking the value of the
-	       #  TLS-Client-Cert-CN attribute.  This check
-	       #  can be done via any mechanism you choose.
-	       #
-	#	check_cert_cn = %{User-Name}
-	#
-		# Set this option to specify the allowed
-		# TLS cipher suites.  The format is listed
-		# in "man 1 ciphers".
-		cipher_list = "DEFAULT"
-	}
-
-}
-
-home_server_pool tls {
-		 type = fail-over
-		 home_server = tls
-}
-
-realm tls {
-      auth_pool = tls
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com b/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com
deleted file mode 100644
index b78a520..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	Sample virtual server for internally proxied requests.
-#
-#	See the "realm virtual.example.com" example in "proxy.conf".
-#
-#	$Id: 211daab3af0161aefa4990b137ba2739257f8326 $
-#
-######################################################################
-
-#
-#  Sample contents: just do everything that the default configuration does.
-#
-#  You WILL want to edit this to your local needs.  We suggest copying
-#  the "default" file here, and then editing it.  That way, any
-#  changes to the "default" file will not affect this virtual server,
-#  and vice-versa.
-#
-#  When this virtual server receives the request, the original
-#  attributes can be accessed as "outer.request", "outer.control", etc.
-#  See "man unlang" for more details.
-#
-server virtual.example.com {
-$INCLUDE	${confdir}/sites-available/default
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/vmps b/src/test/setup/radius-config/freeradius/sites-available/vmps
deleted file mode 100644
index 64d5e93..0000000
--- a/src/test/setup/radius-config/freeradius/sites-available/vmps
+++ /dev/null
@@ -1,98 +0,0 @@
-# -*- text -*-
-######################################################################
-#
-#	As of version 2.0.0, the server also supports the VMPS
-#	protocol.
-#
-#	$Id: 8703902cafb5cc2b869dc42da9f554da313825ad $
-#
-######################################################################
-
-server vmps {
-	listen {
-		# VMPS sockets only support IPv4 addresses.
-		ipaddr = *
-
-		#  Port on which to listen.
-		#  Allowed values are:
-		#	integer port number
-		#	1589 is the default VMPS port.
-		port = 1589
-
-		#  Type of packets to listen for.  Here, it is VMPS.
-		type = vmps
-
-		#  Some systems support binding to an interface, in addition
-		#  to the IP address.  This feature isn't strictly necessary,
-		#  but for sites with many IP addresses on one interface,
-		#  it's useful to say "listen on all addresses for
-		#  eth0".
-		#
-		#  If your system does not support this feature, you will
-		#  get an error if you try to use it.
-		#
-		#	interface = eth0
-	}
-
-	#  If you have switches that are allowed to send VMPS, but NOT
-	#  RADIUS packets, then list them here as "client" sections.
-	#
-	#  Note that for compatibility with RADIUS, you still have to
-	#  list a "secret" for each client, though that secret will not
-	#  be used for anything.
-
-
-	#  And the REAL contents.  This section is just like the
-	#  "post-auth" section of radiusd.conf.  In fact, it calls the
-	#  "post-auth" component of the modules that are listed here.
-	#  But it's called "vmps" to highlight that it's for VMPS.
-	#
-	vmps {
-		#
-		#  Some requests may not have a MAC address.  Try to
-		#  create one using other attributes.
-		if (!VMPS-Mac) {
-			if (VMPS-Ethernet-Frame =~ /0x.{12}(..)(..)(..)(..)(..)(..).*/) {
-				update request {
-					VMPS-Mac = "%{1}:%{2}:%{3}:%{4}:%{5}:%{6}"
-				}
-			}
-			else {
-				update request {
-					VMPS-Mac = "%{VMPS-Cookie}"
-				}
-			}
-		}
-
-		#  Do a simple mapping of MAC to VLAN.
-		#
-		#  See radiusd.conf for the definition of the "mac2vlan"
-		#  module.
-		#
-		#mac2vlan
-
-		# required VMPS reply attributes
-		update reply {
-			VMPS-Packet-Type = VMPS-Join-Response
-			VMPS-Cookie = "%{VMPS-Mac}"
-
-			VMPS-VLAN-Name = "please_use_real_vlan_here"
-
-			#
-			#  If you have VLAN's in a database, you can select
-			#  the VLAN name based on the MAC address.
-			#
-			#VMPS-VLAN-Name = "%{sql:select ... where mac='%{VMPS-Mac}'}"
-		}
-
-		# correct reply packet type for reconfirmation requests
-		#
-		if (VMPS-Packet-Type == VMPS-Reconfirm-Request){
-			update reply {
-				VMPS-Packet-Type := VMPS-Reconfirm-Response
-			}
-		}
-	}
-
-	# Proxying of VMPS requests is NOT supported.
-}
diff --git a/src/test/setup/radius-config/freeradius/sites-enabled/default b/src/test/setup/radius-config/freeradius/sites-enabled/default
deleted file mode 120000
index 6d9ba33..0000000
--- a/src/test/setup/radius-config/freeradius/sites-enabled/default
+++ /dev/null
@@ -1 +0,0 @@
-../sites-available/default
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel b/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel
deleted file mode 120000
index 55aba6e..0000000
--- a/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel
+++ /dev/null
@@ -1 +0,0 @@
-../sites-available/inner-tunnel
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/start-radius.py b/src/test/setup/radius-config/freeradius/start-radius.py
deleted file mode 100755
index 76f76eb..0000000
--- a/src/test/setup/radius-config/freeradius/start-radius.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import sys
-sys.exit(os.system('sh -c radius'))
diff --git a/src/test/setup/radius-config/freeradius/templates.conf b/src/test/setup/radius-config/freeradius/templates.conf
deleted file mode 100644
index 22c0a09..0000000
--- a/src/test/setup/radius-config/freeradius/templates.conf
+++ /dev/null
@@ -1,108 +0,0 @@
-# -*- text -*-
-##
-## templates.conf -- configurations to be used in multiple places
-##
-##	$Id: 7b8b44e051c974c1a0a6e27a0cff50e621835df2 $
-
-######################################################################
-#
-#  Version 2.0 has a useful new feature called "templates".
-#
-#  Use templates by adding a line in radiusd.conf:
-#
-#	$INCLUDE templates.conf
-#
-#  The goal of the templates is to have common configuration located
-#  in this file, and to list only the *differences* in the individual
-#  sections.  This feature is most useful for sections like "clients"
-#  or "home_servers", where many may be defined, and each one has
-#  similar repeated configuration.
-#
-#  Something similar to templates can be done by putting common
-#  configuration into separate files, and using "$INCLUDE file...",
-#  but this is more flexible, and simpler to understand.  It's also
-#  cheaper for the server, because "$INCLUDE" makes a copy of the
-#  configuration for inclusion, and templates are simply referenced.
-#
-#  The templates are defined in the "templates" section, so that they
-#  do not affect the rest of the server configuration.
-#
-#  A section can reference a template by using "$template name"
-#
-templates {
-	#
-	#  The contents of the templates section are other
-	#  configuration sections that would normally go into
-	#  the configuration files.
-	#
-
-	#
-	#  This is a default template for the "home_server" section.
-	#  Note that there is no name for the section.
-	#
-	#  Any configuration item that is valid for a "home_server"
-	#  section is also valid here.  When a "home_server" section
-	#  is defined in proxy.conf, this section is referenced as
-	#  the template.
-	#
-	#  Configuration items that are explicitly listed in a
-	#  "home_server" section of proxy.conf are used in
-	#  preference to the configuration items listed here.
-	#
-	#  However, if a configuration item is NOT listed in a
-	#  "home_server" section of proxy.conf, then the value here
-	#  is used.
-	#
-	#  This functionality lets you put common configuration into
-	#  a template, and to put only the unique configuration
-	#  items in "proxy.conf".  Each section in proxy.conf can
-	#  then contain a line "$template home_server", which will
-	#  cause it to reference this template.
-	#
-	home_server {
-		response_window = 20
-		zombie_period = 40
-		revive_interval = 120
-		#
-		#  Etc.
-	}
-
-	#
-	#  You can also have named templates.  For example, if you
-	#  are proxying to 3 different home servers all at the same
-	#  site, with identical configurations (other than IP
-	#  addresses), you can use this named template.
-	#
-
-	#  Then, each "home_server" section in "proxy.conf" would
-	#  only list the IP address of that home server, and a
-	#  line saying
-	#
-	#		$template example_com
-	#
-	#  That would tell FreeRADIUS to look in the section below
-	#  for the rest of the configuration items.
-	#
-	#  For various reasons, you shouldn't have a "." in the template
-	#  name.  Doing so means that the server will be unable to find
-	#  the template.
-	#
-	example_com {
-		type = auth
-		port = 1812
-		secret = testing123
-		response_window = 20
-		#
-		# Etc...
-	}
-
-	#
-	#  You can have templates for other sections, too, but they
-	#  seem to be most useful for home_servers.
-	#
-	#  For now, you can use templates only for sections in
-	#  radiusd.conf, not sub-sections.  So you still have to use
-	#  the "$INCLUDE file.." method for things like defining
-	#  multiple "sql" modules, each with similar configuration.
-	#
-}
diff --git a/src/test/setup/radius-config/freeradius/trigger.conf b/src/test/setup/radius-config/freeradius/trigger.conf
deleted file mode 100644
index 77ca355..0000000
--- a/src/test/setup/radius-config/freeradius/trigger.conf
+++ /dev/null
@@ -1,260 +0,0 @@
-# -*- text -*-
-##
-## trigger.conf -- Events in the server can trigger a hook to be executed.
-##
-##	$Id: 5cbe8d7d8a09549c060748a582cd6ed359e0e999 $
-
-#
-#  The triggers are named as "type.subtype.value".  These names refer
-#  to subsections and then configuration items in the "trigger"
-#  section below.  When an event occurs, the trigger is executed.  The
-#  trigger is simply a program that is run, with optional arguments.
-#
-#  The server does not wait when a trigger is executed.  It is simply
-#  a "one-shot" event that is sent.
-#
-#  The trigger names should be self-explanatory.
-#
-
-#
-#  SNMP configuration.
-#
-#  For now, this is only for SNMP traps.
-#
-#  They are enabled by uncommenting (or adding) "$INCLUDE trigger.conf"
-#  in the main "radiusd.conf" file.
-#
-#  The traps *REQUIRE* that the files in the "mibs" directory be copied
-#  to the global mibs directory, usually /usr/share/snmp/mibs/.
-#  If this is not done, the "snmptrap" program has no idea what information
-#  to send, and will not work.  The MIB installation is *NOT* done as
-#  part of the default installation, so that step *MUST* be done manually.
-#
-#  The global MIB directory can be found by running the following command:
-#
-#	snmptranslate -Dinit_mib .1.3 2>&1 | grep MIBDIR | sed "s/' .*//;s/.* '//;s/.*://"
-#
-#  Or maybe just:
-#
-#	snmptranslate -Dinit_mib .1.3 2>&1 | grep MIBDIR
-#
-#  If you have copied the MIBs to that directory, you can test the
-#  FreeRADIUS MIBs by running the following command:
-#
-#	snmptranslate -m +FREERADIUS-NOTIFICATION-MIB -IR -On  serverStart
-#
-#  It should print out:
-#
-#	.1.3.6.1.4.1.11344.4.1.1
-#
-#  As always, run the server in debugging mode after enabling the
-#  traps.  You will see the "snmptrap" command being run, and it will
-#  print out any errors or issues that it encounters.  Those need to
-#  be fixed before running the server in daemon mode.
-#
-#  We also suggest running in debugging mode as the "radiusd" user, if
-#  you have "user/group" set in radiusd.conf.  The "snmptrap" program
-#  may behave differently when run as "root" or as the "radiusd" user.
-#
-snmp {
-	#
-	#  Configuration for SNMP traps / notifications
-	#
-	#  To disable traps, edit "radiusd.conf", and delete the line
-	#  which says "$INCUDE trigger.conf"
-	#
-	trap {
-		#
-		#  Absolute path for the "snmptrap" command, and
-		#  default command-line arguments.
-		#
-		#  You can disable traps by changing the command to
-		#  "/bin/echo".
-		#
-		cmd = "/usr/bin/snmptrap -v2c"
-
-		#
-		#  Community string
-		#
-		community = "public"
-
-		#
-		#  Agent configuration.
-		#
-		agent = "localhost ''"
-	}
-}
-
-#
-#  The "snmptrap" configuration defines the full command used to run the traps.
-#
-#  This entry should not be edited.  Instead, edit the "trap" section above.
-#
-snmptrap = "${snmp.trap.cmd} -c ${snmp.trap.community} ${snmp.trap.agent} FREERADIUS-NOTIFICATION-MIB"
-
-#
-#  The individual triggers are defined here.  You can disable one by
-#  deleting it, or by commenting it out.  You can disable an entire
-#  section of traps by deleting the section.
-#
-#  The entries below should not be edited.  For example, the double colons
-#  *must* immediately follow the ${snmptrap} reference.  Adding a space
-#  before the double colons  will break all SNMP traps.
-#
-#  However... the traps are just programs which are run when
-#  particular events occur.  If you want to replace a trap with
-#  another program, you can.  Just edit the definitions below, so that
-#  they run a program of your choice.
-#
-#  For example, you can leverage the "start/stop" triggers to run a
-#  program when the server starts, or when it stops.  But that will
-#  prevent the start/stop SNMP traps from working, of course.
-#
-trigger {
-	#
-	# Events in the server core
-	#
-	server {
-		# the server has just started
-		start = "${snmptrap}::serverStart"
-
-		# the server is about to stop
-		stop = "${snmptrap}::serverStop"
-
-		# The "max_requests" condition has been reached.
-		# This will trigger only once per 60 seconds.
-		max_requests = "${snmptrap}::serverMaxRequests"
-
-		# For events related to clients
-		client {
-			#  Added a new dynamic client
-			add = "/path/to/file %{Packet-Src-IP-Address}"
-
-			#  There is no event for when dynamic clients expire
-		}
-
-		# Events related to signals received.
-		signal {
-			# a HUP signal
-			hup = "${snmptrap}::signalHup"
-
-			# a TERM signal
-			term = "${snmptrap}::signalTerm"
-		}
-
-
-		# Events related to the thread pool
-		thread {
-		       # A new thread has been started
-		       start = "${snmptrap}::threadStart"
-
-		       # an existing thread has been stopped
-		       stop = "${snmptrap}::threadStop"
-
-		       # an existing thread is unresponsive
-		       unresponsive = "${snmptrap}::threadUnresponsive"
-
-		       # the "max_threads" limit has been reached
-		       max_threads = "${snmptrap}::threadMaxThreads"
-		}
-	}
-
-	# When a home server changes state.
-	# These traps are edge triggered.
-	home_server {
-		# common arguments: IP, port, identifier
-		args = "radiusAuthServerAddress a %{proxy-request:Packet-Dst-IP-Address} radiusAuthClientServerPortNumber i %{proxy-request:Packet-Dst-Port} radiusAuthServIdent s '%{home_server:instance}'"
-
-		# The home server has been marked "alive"
-		alive = "${snmptrap}::homeServerAlive ${args}"
-
-		# The home server has been marked "zombie"
-		zombie = "${snmptrap}::homeServerZombie ${args}"
-
-		# The home server has been marked "dead"
-		dead = "${snmptrap}::homeServerDead ${args}"
-	}
-
-	# When a pool of home servers changes state.
-	home_server_pool {
-		# common arguments
-		args = "radiusdConfigName s %{home_server:instance}"
-
-		# It has reverted to "normal" mode, where at least one
-		# home server is alive.
-		normal = "${snmptrap}::homeServerPoolNormal ${args}"
-
-		# It is in "fallback" mode, with all home servers "dead"
-		fallback = "${snmptrap}::homeServerPoolFallback ${args}"
-	}
-
-	#  Triggers for specific modules.  These are NOT in the module
-	#  configuration because they are global to all instances of the
-	#  module.  You can have module-specific triggers, by placing a
-	#  "trigger" subsection in the module configuration.
-	modules {
-		# Common arguments
-		args = "radiusdModuleName s ldap' radiusdModuleInstance s ''"
-
-		# The files module
-		files {
-			# The module has been HUP'd via radmin
-			hup = "${snmptrap}::serverModuleHup ${..args}"
-
-			# Note that "hup" can be used for every module
-			# which can be HUP'd via radmin
-		}
-
-		# The LDAP module
-		ldap {
-			# Failed to open a new connection to the DB
-			fail = "${snmptrap}::serverModuleConnectionFail ${..args}"
-
-			# There are no "open", "close", or "none" setting.
-			# This is because the LDAP module re-connects and closes
-			# the connection for every "bind as user" query.
-		}
-
-		# The SQL module
-		sql {
-			# A new connection to the DB has been opened
-			open = "${snmptrap}::serverModuleConnectionUp ${..args}"
-
-			# A connection to the DB has been closed
-			close = "${snmptrap}::serverModuleConnectionDown ${..args}"
-
-			# Failed to open a new connection to the DB
-			fail = "${snmptrap}::serverModuleConnectionFail ${..args}"
-
-			# There are no DB handles available.
-			none = "${snmptrap}::serverModuleConnectionNone ${..args}"
-		}
-	}
-}
-
-#
-#  The complete list of triggers as generated from the source code is below.
-#
-#  These are the ONLY traps which are generated.  You CANNOT add new traps
-#  by defining them in one of the sections above.  New traps can be created
-#  only by edited both the source code to the server, *and* the MIBs.
-#  If you are not an expert in C and SNMP, then adding new traps will be
-#  difficult to create.
-#
-# home_server.alive
-# home_server.dead
-# home_server.zombie
-# home_server_pool.fallback
-# home_server_pool.normal
-# modules.*.hup
-# modules.ldap.fail
-# modules.sql.close
-# modules.sql.fail
-# modules.sql.none
-# modules.sql.open
-# server.client.add
-# server.max_requests
-# server.signal.hup
-# server.signal.term
-# server.start
-# server.stop
diff --git a/src/test/setup/radius-config/freeradius/users b/src/test/setup/radius-config/freeradius/users
deleted file mode 120000
index 7055798..0000000
--- a/src/test/setup/radius-config/freeradius/users
+++ /dev/null
@@ -1 +0,0 @@
-mods-config/files/authorize
\ No newline at end of file
diff --git a/src/test/setup/requirements.txt b/src/test/setup/requirements.txt
deleted file mode 100644
index 71676b7..0000000
--- a/src/test/setup/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-nose
-scapy==2.3.2
-monotonic
-configObj
-docker
-nsenter
-pyroute2
-netaddr
-python-daemon
-scapy-ssl_tls==1.2.2
-robotframework
-robotframework-requests
-robotframework-sshlibrary
-robotframework-httplibrary
-paramiko==2.3.1
-twisted
-pexpect
-apiclient
-pyyaml
-pyopenssl
-#python-libmaas
-#maasclient
-#maasutil
diff --git a/src/test/setup/syndicate-ms/Dockerfile b/src/test/setup/syndicate-ms/Dockerfile
deleted file mode 100644
index e74db92..0000000
--- a/src/test/setup/syndicate-ms/Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-# Syndicate Metadata Server
-# See also https://github.com/syndicate-storage/syndicate-docker
-
-FROM ubuntu:14.04.4
-MAINTAINER Zack Williams <zdw@cs.arizona.edu>
-
-# vars
-ENV APT_KEY butler_opencloud_cs_arizona_edu_pub.gpg
-ENV MS_PORT 8080
-ENV GAE_SDK google_appengine_1.9.35.zip
-
-# Prep apt to be able to download over https
-RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --force-yes\
-    apt-transport-https
-
-# copy over and trust https cert
-COPY butler.crt /usr/local/share/ca-certificates
-RUN update-ca-certificates
-
-# Install Syndicate MS
-COPY $APT_KEY /tmp/
-RUN apt-key add /tmp/$APT_KEY
-
-RUN echo "deb https://butler.opencloud.cs.arizona.edu/repos/release/syndicate syndicate main" > /etc/apt/sources.list.d/butler.list
-
-RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --force-yes\
-    syndicate-core \
-    syndicate-ms \
-    wget \
-    unzip
-
-# setup syndicate user
-RUN groupadd -r syndicate && useradd -m -r -g syndicate syndicate
-USER syndicate
-ENV HOME /home/syndicate
-WORKDIR $HOME
-
-# setup GAE
-RUN wget -nv https://storage.googleapis.com/appengine-sdks/featured/$GAE_SDK
-RUN unzip -q $GAE_SDK
-
-# Expose the MS port
-EXPOSE $MS_PORT
-
-# Create a storage location
-RUN mkdir $HOME/datastore
-
-# run the MS under GAE
-CMD $HOME/google_appengine/dev_appserver.py --admin_host=0.0.0.0 --host=0.0.0.0 --storage_path=$HOME/datastore --skip_sdk_update_check=true /usr/src/syndicate/ms
-
-
diff --git a/src/test/setup/syndicate-ms/Makefile b/src/test/setup/syndicate-ms/Makefile
deleted file mode 100644
index 2c24afc..0000000
--- a/src/test/setup/syndicate-ms/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-IMAGE_NAME:=xosproject/syndicate-ms
-CONTAINER_NAME:=xos-syndicate-ms
-NO_DOCKER_CACHE?=false
-
-.PHONY: build
-build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
-
-.PHONY: run
-run: ; docker run -d -p 8080:8080 --name ${CONTAINER_NAME} ${IMAGE_NAME}
-
-.PHONY: stop
-stop: ; docker stop ${CONTAINER_NAME}
-
-.PHONY: rm
-rm: ; docker rm ${CONTAINER_NAME}
-
-.PHONY: rmi
-rmi: ; docker rmi ${IMAGE_NAME}
-
diff --git a/src/test/setup/syndicate-ms/butler.crt b/src/test/setup/syndicate-ms/butler.crt
deleted file mode 100644
index be60161..0000000
--- a/src/test/setup/syndicate-ms/butler.crt
+++ /dev/null
@@ -1,37 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIGgjCCBWqgAwIBAgIRAJ26ZC+oEixlqDU7+7cazpIwDQYJKoZIhvcNAQELBQAw
-djELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1JMRIwEAYDVQQHEwlBbm4gQXJib3Ix
-EjAQBgNVBAoTCUludGVybmV0MjERMA8GA1UECxMISW5Db21tb24xHzAdBgNVBAMT
-FkluQ29tbW9uIFJTQSBTZXJ2ZXIgQ0EwHhcNMTYwMzIyMDAwMDAwWhcNMTkwMzIy
-MjM1OTU5WjCBqzELMAkGA1UEBhMCVVMxDjAMBgNVBBETBTg1NzIxMQswCQYDVQQI
-EwJBWjEPMA0GA1UEBxMGVHVjc29uMSIwIAYDVQQKExlUaGUgVW5pdmVyc2l0eSBv
-ZiBBcml6b25hMSAwHgYDVQQLExdDb21wdXRlciBTY2llbmNlICgwNDEyKTEoMCYG
-A1UEAxMfYnV0bGVyLm9wZW5jbG91ZC5jcy5hcml6b25hLmVkdTCCAiIwDQYJKoZI
-hvcNAQEBBQADggIPADCCAgoCggIBAKHUqBxVP6fvTm015n8hXfe53B2IHbMbkwCj
-6eqc2mak8PEVIoD1Ds2TlrvS6xWtFJfNdKlMTNQMh3dVjUC8xcB+OUdr1Q3qv9to
-qiUJC+kTnJNDtOqYqJzX9koH+tHD0zr5/cqyT4vLkJZJXiZ5NGKyHUeh9INTj/ZG
-yHHVrDiF5gUyNl7HrN53AMPpJAxO0rurN5tI3ozK8TE60sslVdxE5zWwnSGazS+0
-hcz7uIyDTpyuo8H6iA/F5L5/USLqAYHLTk10Hg/7vnbRMbaz6sdXPFm+gtZPm5mG
-L2P9I4GM6L/TBXL7+etUtPAgVMoYrdDGZ3wmWOrWukD6ax3BVaX+dJxFNUTju2MZ
-1By6nJIzBBezHE7j4dhjRDaGwsxmdvEjn8weoeWS8ngT3fnm6btFgzO0O2CC3QN9
-M6pk5kJGm8kyhcc8nX4gv/Tkl1gHAd9VNgEJPY3YFXWigtjK7fSYGe9GDQsploUG
-OubK5S8eelSej1u9XW/NgqdxwgQWmxeppWxSwWb4wVyunVX03UHFmk6XnSdtF54E
-iy8VIuItRYyZGni8gAyCx8z6ke2zd8+wWgzsjxQ3dHjbLFxV1O57ZyNyb8TuZ5hk
-0QoJqdR0X6EXc+z4+tV+yYQGQZ5L3vgz7REp3TnlgG8acp3JfZpH8gng05cX6sBi
-I+NbZEmPAgMBAAGjggHTMIIBzzAfBgNVHSMEGDAWgBQeBaN3j2yW4luHS6a0hqxx
-AAznODAdBgNVHQ4EFgQUDfCqsiaVDm70iLaq32jUEmKr9pIwDgYDVR0PAQH/BAQD
-AgWgMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
-MGcGA1UdIARgMF4wUgYMKwYBBAGuIwEEAwEBMEIwQAYIKwYBBQUHAgEWNGh0dHBz
-Oi8vd3d3LmluY29tbW9uLm9yZy9jZXJ0L3JlcG9zaXRvcnkvY3BzX3NzbC5wZGYw
-CAYGZ4EMAQICMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwuaW5jb21tb24t
-cnNhLm9yZy9JbkNvbW1vblJTQVNlcnZlckNBLmNybDB1BggrBgEFBQcBAQRpMGcw
-PgYIKwYBBQUHMAKGMmh0dHA6Ly9jcnQudXNlcnRydXN0LmNvbS9JbkNvbW1vblJT
-QVNlcnZlckNBXzIuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1
-c3QuY29tMCoGA1UdEQQjMCGCH2J1dGxlci5vcGVuY2xvdWQuY3MuYXJpem9uYS5l
-ZHUwDQYJKoZIhvcNAQELBQADggEBACUaI/yYc0pxAuwIWi0985f06MdKEMJo+qEO
-YLXENApQrJhTPdV9OaChlzI4x2ExmffPZEmhyD0q7z57mT9QkBYQwEJqwbRqfY2v
-0iQ4nLLkyXh7SJSS7J4WSG+cFEN1nFZ8/YGg/TD8spEIPeUGsUvRoJmJm9z90uqd
-+ETDc+79TZHxserOY3AJtlvzPScJa1HAqgDJGzgwGdUn82+bKZF5WGsGbfwUS6uS
-Ua2SsOxVZOn5ukF2g9vYs3dcO8u5ITAWrR1s6ACg/wGxvfvXwazpeiFx/RxilpcV
-6W7mTwbE76ZbkafrXbnZ6ihhIPARsVJhJsnClnf5OM7IqrX5g80=
------END CERTIFICATE-----
diff --git a/src/test/setup/syndicate-ms/butler_opencloud_cs_arizona_edu_pub.gpg b/src/test/setup/syndicate-ms/butler_opencloud_cs_arizona_edu_pub.gpg
deleted file mode 100644
index 92a2ae4..0000000
--- a/src/test/setup/syndicate-ms/butler_opencloud_cs_arizona_edu_pub.gpg
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQINBFb+uuwBEADgmbb2CPnQ2LofLdx5rJN4O75TAjYjJAPyyyIZL2bKmhhuRYwK
-a/gZAlOy5Y/4o5pRgG5s1BFkrSvWRIP+Y3D+PHz7wppjlo31NGm4+34stzlzGu4K
-EEUZpCiUiD1tCxX/H9jZTo5Dm2YvdLxnkWSkbf1ZkIzwNjM3bnYily2a/1NwMmqt
-18Hsy+3ivvUEZO0FmO2reP1l7Eb0hLR2QPxSA4/PxQ81+EJ3CObRYaUZ9KjgIRah
-eyP+PsXaFnxkoikGHod9ll2iWPzpkOUh+xXAu73YK4ikCrIUZ5Oe98Euja8h856H
-xiRRLGVL3iqzgAQJxG/0cXbiobN7bNYGlvLLyp+qRNbmgSYonsJxON4aVG+wjiLi
-gYCOQ/FQT0tYGeDprPBWRj6iGiic6K7W9BDXkxPqlYIYomMrjrqW5kX0YGMp+V7c
-2QG3yfh4+3pfpM+ZYfrAtCdgklYmCYBhoaieMrjIYw31PWqMuzxeb3xBS6++6ksH
-d9TlJKLgJ1UPiKLgDOEyIbYVWhPs2sQoRRstuKfPF9Gdv0UUAnqlyA8siVrvZfB2
-7D05PM4mv83GshoZ8ZAkV7uS6PFJIg6JM11dUM50LTfvHe7ig93CBvbFzm+RqxjQ
-JYf1XWd19912TW7NcNz6lg5jxEYLh8WYJin2xC2aLLb+hpy5NHE/Ien2aQARAQAB
-tCVPcGVuQ2xvdWQgQnV0bGVyIDxzaXRlc0BvcGVuY2xvdWQudXM+iQI9BBMBCgAn
-BQJW/rrsAhsDBQkDwmcABQsJCAcDBRUKCQgLBRYDAgEAAh4BAheAAAoJELvMx3QD
-/Cyyc2UQAIw2A8qrNMQt4skrR/87uKQjfJ/OXC7MEBDTLSL0Ed0VIuRrA/E1s1D/
-YJpdsFfKJyDbZ2Id25L+1QclvEjnsEDCIiURGcRmXLLsqjHCw4N2C16P2JasQVWo
-i1lkqUHC8kCzvR75u+agzpn16Qhb8FqLQxBSxd8vhMEw2LnrjRsjHGwErKhpYfOg
-LFXyurKKBb4KYOLortICgcE3Wz6eqgbNInrTMrSOSf5P7nsPINCFTyemzUyT53IU
-07RmJwTOrcgqJR5klghHQnFXJBkB55EMvFLjUrL4dpnAmlbkKhyFX8aRsBD5Frt2
-93LkHWDa35SELEzfIQznIsfok1rHgDR8kAh7m+tEbmn/Qk3llJ7c/r4JqG0RVGfe
-OfYZDT4I12H6ZWIoLjktnAP4QlDf+olILEYAD0PvKEQU7sQpMmex5QBMt6vvGAj6
-RfPn1iFhUZdOPB7GyWtUn8hmBCEfLAoAAntgoW9NC+PI/chFrm6Nugjz60TbMMOd
-i4s5J998AuJeF2RJogIi61a4VYcprSMTkF5b8kxBhV4N4J5jJQEQxo3ztdw7USvj
-ce8/3/69mBT7rIXgk39FvqnSIz9SmyQ+wgLb94Gcpy1id64yab2P1LNm3pORafSN
-F59uVqgEv5W2g/frt5QMSBO06dvzNjStIV7/uxlOHuSNooIClr//
-=JFDF
------END PGP PUBLIC KEY BLOCK-----
diff --git a/src/test/setup/test_docker/Dockerfile b/src/test/setup/test_docker/Dockerfile
deleted file mode 100644
index e5c72a7..0000000
--- a/src/test/setup/test_docker/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM ubuntu:14.04
-MAINTAINER chetan@ciena.com
-
-RUN apt-get update 
-RUN apt-get -y install git python python-pip python-setuptools python-scapy tcpdump doxygen doxypy
-RUN easy_install nose
-RUN apt-get -y install openvswitch-common openvswitch-switch
-WORKDIR /root
-RUN mkdir ovs
-COPY ./openvswitch-2.4.0.tar.gz /root
-COPY ./build_ovs.sh /root/
-RUN /root/build_ovs.sh
-RUN apt-get -y install python-twisted python-sqlite sqlite3
-RUN pip install scapy-ssl_tls
-RUN pip install -U scapy
-RUN pip install monotonic
-RUN mv /usr/sbin/tcpdump /sbin/
-RUN ln -sf /sbin/tcpdump /usr/sbin/tcpdump
-CMD ["/bin/bash"]
diff --git a/src/test/setup/test_docker/build_ovs.sh b/src/test/setup/test_docker/build_ovs.sh
deleted file mode 100755
index 879da31..0000000
--- a/src/test/setup/test_docker/build_ovs.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#!/usr/bin/env bash
-echo "OVS installation"
-cd /root/ && tar zxpvf openvswitch-2.4.0.tar.gz -C /root/ovs
-cd /root/ovs
-cd openvswitch-2.4.0 && ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install
-service openvswitch-controller stop
-service openvswitch-switch restart
diff --git a/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz b/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz
deleted file mode 100644
index 135022b..0000000
--- a/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz
+++ /dev/null
Binary files differ
diff --git a/src/test/setup/venv-apitests.sh b/src/test/setup/venv-apitests.sh
deleted file mode 100644
index 8c08b92..0000000
--- a/src/test/setup/venv-apitests.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-BASEDIR=$(pwd)
-VENVDIR=venv-xosapitest
-
-# create venv if it's not yet there
-if [ ! -d "$BASEDIR/$VENVDIR" ]; then
-    echo "Setting up Virtualenv for XOS API Tests"
-    virtualenv -q $BASEDIR/$VENVDIR --no-site-packages
-    echo "Virtualenv created."
-fi
-
-# activate the virtual env
-if [ ! $VIRTUAL_ENV ]; then
-    source $BASEDIR/$VENVDIR/bin/activate
-    echo "Virtualenv activated."
-fi
-
-# install pip packages
-pip install robotframework robotframework-sshlibrary robotframework-requests robotframework-httplibrary pexpect pyyaml
diff --git a/src/test/setup/voltha-test.py b/src/test/setup/voltha-test.py
deleted file mode 100755
index a08c662..0000000
--- a/src/test/setup/voltha-test.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from argparse import ArgumentParser
-import os
-import sys
-utils_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../utils')
-cli_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../cli')
-sys.path.append(utils_dir)
-sys.path.append(cli_dir)
-sys.path.insert(1, '/usr/local/lib/python2.7/dist-packages')
-from CordTestUtils import getstatusoutput
-import time
-import requests
-import httplib
-import json
-import signal
-
-class CordTesterWebClient(object):
-
-    def __init__(self, host = 'localhost', port = 5000):
-        self.host = host
-        self.port = port
-        self.rest = 'http://{}:{}'.format(self.host, self.port)
-
-    def get_config(self, test_case):
-        rest_uri = '{}/get'.format(self.rest)
-        config = { 'test_case' : test_case }
-        resp = requests.get(rest_uri, data = json.dumps(config))
-        if resp.ok and resp.status_code == 200:
-            config = resp.json()
-            return config
-        return None
-
-    def set_config(self, test_case, test_config):
-        rest_uri = '{}/update'.format(self.rest)
-        config = { 'test_case' : test_case, 'config' : test_config }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def restore_config(self, test_case):
-        rest_uri = '{}/restore'.format(self.rest)
-        config = { 'test_case' : test_case }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def start(self, manifest = 'manifest.json'):
-        rest_uri = '{}/start'.format(self.rest)
-        config = { 'manifest' : manifest }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def cleanup(self, manifest = 'manifest.json'):
-        rest_uri = '{}/cleanup'.format(self.rest)
-        config = { 'manifest' : manifest }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def test(self, test, manifest = 'manifest.json', test_config = None):
-        rest_uri = '{}/test'.format(self.rest)
-        config = { 'manifest' : manifest, 'test' : test }
-        if test_config:
-            config['config'] = test_config
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-class Tester(CordTesterWebClient):
-
-    def __init__(self, host = 'localhost', port = 5000):
-        super(Tester, self).__init__(host = host, port = port)
-
-    def execute(self, test_case, manifest = 'manifest.json', test_config = None):
-        print('Executing test %s' %test_case)
-        _, status = self.start(manifest = manifest)
-        assert status == httplib.OK, 'Test setup failed with status code %d' %status
-        _, status = self.test(test_case, manifest = manifest, test_config = test_config)
-        assert status == httplib.OK, 'Test run for test %s failed with status %d' %(test_case, status)
-        _, status = self.cleanup(manifest = manifest)
-        assert status == httplib.OK, 'Test cleanup failed with status %d' %status
-        print('Test executed successfully')
-
-
-class CordTesterWeb(object):
-
-    def __init__(self, args, start_in = 3):
-        self.args = args
-        self.tester = Tester()
-        self.start_in = start_in
-
-    def run(self):
-        manifest = self.args.manifest
-        olt_type = self.args.olt_type
-        test_type = self.args.test_type
-        disable_teardown = self.args.disable_teardown
-        test_mode = self.args.test_mode
-        disable_cleanup = self.args.disable_cleanup
-        if test_mode is True:
-            disable_cleanup = True
-        test_config = { 'VOLTHA_HOST' : self.args.voltha_host,
-                        'VOLTHA_OLT_TYPE' : self.args.olt_type,
-                        'VOLTHA_TEARDOWN' : not disable_teardown,
-                        }
-        if olt_type.startswith('tibit'):
-            test_config['VOLTHA_OLT_MAC'] = self.args.olt_arg
-        elif olt_type.startswith('maple'):
-            test_config['VOLTHA_OLT_IP'] = self.args.olt_arg
-        elif olt_type.startswith('ponsim'):
-            test_config['VOLTHA_PONSIM_HOST'] = self.args.olt_arg
-        else:
-            print('Unsupported OLT type %s' %olt_type)
-            return 127
-
-        if self.start_in:
-            time.sleep(self.start_in)
-
-        if test_mode is False:
-            _, status = self.tester.start(manifest = manifest)
-            assert status == httplib.OK, 'Test setup failed with status %d' %status
-
-        for test in test_type.split(','):
-            print('Running test case %s' %(test))
-            _, status = self.tester.test(test, manifest = manifest, test_config = test_config)
-            if status != httplib.OK:
-                print('Test case %s failed with status code %d' %(test, status))
-
-        if disable_cleanup is False:
-            print('Cleaning up the test')
-            self.tester.cleanup(manifest = manifest)
-        return 0 if status == httplib.OK else 127
-
-class CordTesterWebServer(object):
-
-    server_path = os.path.dirname(os.path.realpath(__file__))
-    server = 'webserver-run.py'
-    pattern = 'pgrep -f "python ./{}"'.format(server)
-
-    def running(self):
-        st, _ = getstatusoutput(self.pattern)
-        return True if st == 0 else False
-
-    def kill(self):
-        st, output = getstatusoutput(self.pattern)
-        if st == 0 and output:
-            pids = output.strip().splitlines()
-            for pid in pids:
-                try:
-                    os.kill(int(pid), signal.SIGKILL)
-                except:
-                    pass
-
-    def start(self):
-        if self.running() is False:
-            print('Starting CordTester Web Server')
-            cmd = 'cd {} && python ./{} &'.format(self.server_path, self.server)
-            os.system(cmd)
-
-def run_test(args):
-    testWebServer = CordTesterWebServer()
-    testWebServer.start()
-    testWeb = CordTesterWeb(args, start_in = 3)
-    status = testWeb.run()
-    testWebServer.kill()
-    return status
-
-if __name__ == '__main__':
-    parser = ArgumentParser(description = 'VOLTHA tester')
-    parser.add_argument('-test-type', '--test-type', default = 'tls:eap_auth_exchange.test_eap_tls', help = 'Test type to run')
-    parser.add_argument('-manifest', '--manifest', default='manifest-voltha.json', help = 'Manifest file to use')
-    parser.add_argument('-voltha-host', '--voltha-host', default='172.17.0.1', help = 'VOLTHA host ip')
-    parser.add_argument('-olt-type', '--olt-type', default = 'ponsim_olt', help = 'OLT type')
-    parser.add_argument('-olt-arg', '--olt-arg', default = '172.17.0.1', help = 'OLT type argument')
-    parser.add_argument('-disable-teardown', '--disable-teardown', action='store_true', help = 'Disable VOLTHA teardown')
-    parser.add_argument('-disable-cleanup', '--disable-cleanup', action='store_true', help = 'Dont cleanup cord-tester')
-    parser.add_argument('-test-mode', '--test-mode', action='store_true',
-                        help = 'Directly run the cord-tester run-test phase without setup and cleanup')
-
-    parser.set_defaults(func = run_test)
-    args = parser.parse_args()
-    res = args.func(args)
-    sys.exit(res)
diff --git a/src/test/setup/webserver-client-sample.py b/src/test/setup/webserver-client-sample.py
deleted file mode 100644
index 2199820..0000000
--- a/src/test/setup/webserver-client-sample.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import requests
-import json
-import httplib
-
-class CordTesterWebClient(object):
-
-    def __init__(self, host = 'localhost', port = 5000):
-        self.host = host
-        self.port = port
-        self.rest = 'http://{}:{}'.format(self.host, self.port)
-
-    def get_config(self, test_case):
-        rest_uri = '{}/get'.format(self.rest)
-        config = { 'test_case' : test_case }
-        resp = requests.get(rest_uri, data = json.dumps(config))
-        if resp.ok and resp.status_code == 200:
-            config = resp.json()
-            return config
-        return None
-
-    def set_config(self, test_case, test_config):
-        rest_uri = '{}/update'.format(self.rest)
-        config = { 'test_case' : test_case, 'config' : test_config }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def restore_config(self, test_case):
-        rest_uri = '{}/restore'.format(self.rest)
-        config = { 'test_case' : test_case }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def start(self, manifest = 'manifest.json'):
-        rest_uri = '{}/start'.format(self.rest)
-        config = { 'manifest' : manifest }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def cleanup(self, manifest = 'manifest.json'):
-        rest_uri = '{}/cleanup'.format(self.rest)
-        config = { 'manifest' : manifest }
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-    def test(self, test, manifest = 'manifest.json', test_config = None):
-        rest_uri = '{}/test'.format(self.rest)
-        config = { 'manifest' : manifest, 'test' : test }
-        if test_config:
-            config['config'] = test_config
-        resp = requests.post(rest_uri, data = json.dumps(config))
-        return resp.ok, resp.status_code
-
-class Tester(CordTesterWebClient):
-
-    def __init__(self, host = 'localhost', port = 5000):
-        super(Tester, self).__init__(host = host, port = port)
-
-    def execute(self, test_case, manifest = 'manifest.json', test_config = None):
-        print('Executing test %s' %test_case)
-        _, status = self.start(manifest = manifest)
-        assert status == httplib.OK, 'Test setup failed with status code %d' %status
-        _, status = self.test(test_case, manifest = manifest, test_config = test_config)
-        assert status == httplib.OK, 'Test run for test %s failed with status %d' %(test_case, status)
-        _, status = self.cleanup(manifest = manifest)
-        assert status == httplib.OK, 'Test cleanup failed with status %d' %status
-        print('Test executed successfully')
-
-if __name__ == '__main__':
-    tester = Tester()
-    tests = ('tls', 'igmp',)
-    for test in tests:
-        print('Getting config for test %s' %test)
-        config = tester.get_config(test)
-        print('%s' %config)
-
-    tls_cfg = { 'VOLTHA_OLT_MAC' : '00:0c:e2:31:10:00' }
-    igmp_cfg = { 'PORT_RX_DEFAULT' : 1, 'PORT_TX_DEFAULT' : 2, 'IGMP_TEST_TIMEOUT' : 10 }
-    manifest = 'manifest-ponsim.json'
-    tests = ( ('tls:eap_auth_exchange.test_eap_tls', tls_cfg, manifest),
-              ('igmp:igmp_exchange.test_igmp_join_verify_traffic', igmp_cfg, manifest),
-              )
-
-    print('Setting up the test with %s' %manifest)
-    _, status = tester.start(manifest = manifest)
-    assert status == httplib.OK, 'Test setup failed with status code %d' %status
-
-    for t, cfg, m in tests:
-        _, status = tester.test(t, manifest = m, test_config = cfg)
-        if status != httplib.OK:
-            print('Test case %s failed with status code %d' %(t, status))
-        else:
-            print('Test case %s executed successfully' %t)
-
-    print('Cleaning up the test with %s' %manifest)
-    tester.cleanup(manifest = manifest)
diff --git a/src/test/setup/webserver-get.curl b/src/test/setup/webserver-get.curl
deleted file mode 100755
index bdaf7f8..0000000
--- a/src/test/setup/webserver-get.curl
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-# example cord test web get to retrieve config from voltha test case
-
-curl  -H "Content-Type: application/json" -X GET -d '{"test_case" : "voltha"}' http://localhost:5000/get
\ No newline at end of file
diff --git a/src/test/setup/webserver-post.curl b/src/test/setup/webserver-post.curl
deleted file mode 100755
index 2ec2763..0000000
--- a/src/test/setup/webserver-post.curl
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-
-# example cord webserver post request to update config for cluster test
-
-cat > /tmp/testconfig.json <<EOF
-{
-"test_case" : "cluster",
- "config" :
- {
-      "TLS_TIMEOUT" : 10,
-      "ITERATIONS" : 50
- }
-}
-EOF
-curl -H "Content-Type: application/json" -X POST -d @/tmp/testconfig.json http://localhost:5000/update
-rm -f /tmp/testconfig.json
-read -p "Do you want to restore the config?" ans
-case "$ans" in
-  [Yy]*)
-         echo "Restoring test case config"
-         curl -H "Content-Type: application/json" -X POST -d '{ "test_case" : "cluster" }' http://localhost:5000/restore
-         ;;
-esac
diff --git a/src/test/setup/webserver-run.py b/src/test/setup/webserver-run.py
deleted file mode 100755
index a13ebd3..0000000
--- a/src/test/setup/webserver-run.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from webserver import app
-app.run(debug=True, port = 5000)
diff --git a/src/test/setup/webserver-test-tls.curl b/src/test/setup/webserver-test-tls.curl
deleted file mode 100755
index 6af5b95..0000000
--- a/src/test/setup/webserver-test-tls.curl
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-
-# example cord webserver post request to run tls test case
-
-cat > /tmp/testrun.json <<EOF
-{
-    "manifest" : "manifest.json",
-    "test" : "tls:eap_auth_exchange.test_eap_tls",
-    "config" : {
-        "VOLTHA_HOST" : "172.17.0.1",
-        "VOLTHA_REST_PORT" : 8882,
-        "VOLTHA_OLT_TYPE" : "ponsim_olt",
-        "VOLTHA_OLT_MAC" : "00:0c:e2:31:10:00"
-    }
-}
-EOF
-status=`curl -s -w "%{http_code}" -o /dev/null -H "Content-Type: application/json" -X POST -d '{ "manifest" : "manifest.json" }' http://localhost:5000/start`
-if [ $status -ne 200 ]; then
-  echo "Test setup failed with status code $status"
-  exit 1
-fi
-status=`curl -s -w "%{http_code}" -o /dev/null -H "Content-Type: application/json" -X POST -d @/tmp/testrun.json http://localhost:5000/test`
-if [ $status -ne 200 ]; then
-  echo "Test run failed with status code $status"
-  exit 1
-fi
-status=`curl -s -w "%{http_code}" -o /dev/null -H "Content-Type: application/json" -X POST -d '{ "manifest" : "manifest.json" }' http://localhost:5000/cleanup`
-if [ $status -ne 200 ]; then
-  echo "Test cleanup failed with status code $status"
-fi
-rm -f /tmp/testrun.json
-echo "Test successful"
diff --git a/src/test/setup/webserver/__init__.py b/src/test/setup/webserver/__init__.py
deleted file mode 100644
index ab0a820..0000000
--- a/src/test/setup/webserver/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from flask import Flask
-app = Flask(__name__)
-from webserver import cordTestConfig
diff --git a/src/test/setup/webserver/cordTestConfig.py b/src/test/setup/webserver/cordTestConfig.py
deleted file mode 100644
index bfe7c4c..0000000
--- a/src/test/setup/webserver/cordTestConfig.py
+++ /dev/null
@@ -1,192 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from webserver import app
-from flask import request, jsonify
-import httplib
-import json
-import os
-import sys
-import copy
-from collections import OrderedDict
-
-class CordTesterRun(object):
-    our_path = os.path.dirname(os.path.realpath(__file__))
-    exec_base = os.path.realpath(os.path.join(our_path, '..'))
-
-    @classmethod
-    def start(cls, manifest):
-        status = False
-        manifest_file = os.path.join(cls.exec_base, manifest)
-        if os.access(manifest_file, os.F_OK):
-            cmd = 'sudo {}/cord-test.py setup -m {}'.format(cls.exec_base, manifest_file)
-            ret = os.system(cmd)
-            status = True if ret == 0 else False
-
-        return status
-
-    @classmethod
-    def cleanup(cls, manifest):
-        status = False
-        manifest_file = os.path.join(cls.exec_base, manifest)
-        if os.access(manifest_file, os.F_OK):
-            cmd = 'sudo {}/cord-test.py cleanup -m {}'.format(cls.exec_base, manifest_file)
-            os.system(cmd)
-            status = True
-
-        return status
-
-    @classmethod
-    def test(cls, manifest, test, config = None):
-        manifest_file = os.path.join(cls.exec_base, manifest)
-        if not os.access(manifest_file, os.F_OK):
-            return False
-        #get test case as we could give a specific test to execute within a test case
-        test_case = test.split(':')[0]
-        cordWeb = CordTesterWebConfig(test_case)
-        if config:
-            status = cordWeb.update(config)
-            #test case is invalid
-            if status is False:
-                return status
-        cmd = 'sudo {}/cord-test.py run -m {} -t {}'.format(cls.exec_base, manifest_file, test)
-        ret = os.system(cmd)
-        status = True if ret == 0 else False
-        if config:
-            cordWeb.restore()
-        return status
-
-class CordTesterWebConfig(object):
-    our_path = os.path.dirname(os.path.realpath(__file__))
-    test_base = os.path.realpath(os.path.join(our_path, '..', '..'))
-    restore_config = {}
-
-    def __init__(self, test_case):
-        self.test_case = test_case
-        self.test_path = None
-        self.test_config = None
-        test_path = os.path.join(self.test_base, self.test_case)
-        if os.path.isdir(test_path):
-            self.test_path = test_path
-            self.test_config = os.path.join(self.test_path, '{}Test.json'.format(self.test_case))
-
-    def update(self, config):
-        cur_config = OrderedDict()
-        if self.test_config:
-            if os.access(self.test_config, os.F_OK):
-                with open(self.test_config, 'r') as f:
-                    cur_config = json.load(f, object_pairs_hook = OrderedDict)
-                self.save(copy.copy(cur_config))
-            for k, v in config.iteritems():
-                cur_config[k] = v
-            with open(self.test_config, 'w') as f:
-                json.dump(cur_config, f, indent = 4)
-            return True
-        return False
-
-    def save(self, cur_config):
-        self.restore_config[self.test_case] = cur_config
-
-    def restore(self):
-        config = None
-        if self.test_config:
-            if self.test_case in self.restore_config:
-                config = self.restore_config[self.test_case]
-                with open(self.test_config, 'w') as f:
-                    json.dump(config, f, indent = 4)
-                return True
-
-        return False
-
-    def get(self):
-        cur_config = {}
-        if self.test_config:
-            if os.access(self.test_config, os.F_OK):
-                with open(self.test_config) as f:
-                    cur_config = json.load(f, object_pairs_hook = OrderedDict)
-        return cur_config
-
-@app.route('/')
-@app.route('/index')
-def index():
-    return 'Welcome to Cord Tester configuration page'
-
-@app.route('/get')
-def get():
-    data = request.get_json(force = True)
-    test_case = data.get('test_case', None)
-    if test_case:
-        cordWeb = CordTesterWebConfig(test_case)
-        config = cordWeb.get()
-        return jsonify(config)
-    return ('', httplib.NOT_FOUND)
-
-@app.route('/update', methods = ['POST'])
-def update():
-    data = request.get_json(force = True)
-    test_case = data.get('test_case', None)
-    config = data.get('config', None)
-    response = ('', httplib.NOT_FOUND)
-    if test_case:
-        cordWeb = CordTesterWebConfig(test_case)
-        status = cordWeb.update(config)
-        if status:
-            response = ('', httplib.OK)
-
-    return response
-
-@app.route('/restore', methods = ['POST'])
-def restore():
-    data = request.get_json(force = True)
-    test_case = data.get('test_case', None)
-    response = ('', httplib.NOT_FOUND)
-    if test_case:
-        cordWeb = CordTesterWebConfig(test_case)
-        status = cordWeb.restore()
-        if status:
-            response = ('', httplib.OK)
-    return response
-
-@app.route('/start', methods = ['POST'])
-def start():
-    data = request.get_json(force = True)
-    manifest = data.get('manifest', 'manifest.json')
-    status = CordTesterRun.start(manifest)
-    if status:
-        return ('', httplib.OK)
-    return ('', httplib.NOT_ACCEPTABLE)
-
-@app.route('/cleanup', methods = ['POST'])
-def cleanup():
-    data = request.get_json(force = True)
-    manifest = data.get('manifest', 'manifest.json')
-    status = CordTesterRun.cleanup(manifest)
-    if status:
-        return ('', httplib.OK)
-    return ('', httplib.NOT_ACCEPTABLE)
-
-@app.route('/test', methods = ['POST'])
-def test():
-    data = request.get_json(force = True)
-    manifest = data.get('manifest', 'manifest.json')
-    test = data.get('test', None)
-    config = data.get('config', None)
-    if test is None:
-        return ('', httplib.NOT_FOUND)
-    status = CordTesterRun.test(manifest, test, config = config)
-    if status:
-        return ('', httplib.OK)
-    return ('', httplib.NOT_ACCEPTABLE)
diff --git a/src/test/subscriber/__init__.py b/src/test/subscriber/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/subscriber/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/subscriber/generate_portmap.py b/src/test/subscriber/generate_portmap.py
deleted file mode 100644
index 23ef1f3..0000000
--- a/src/test/subscriber/generate_portmap.py
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-##Generate a port map for 100 subscribers based on veth pairs
-import sys
-header = '''###This file is auto-generated. Do not EDIT###'''
-def generate_port_map(num = 100):
-    print("g_subscriber_port_map = {}")
-    for i in xrange(1, num+1):
-        intf = 'veth' + str(2*i-2)
-        print("g_subscriber_port_map[%d]='%s'" %(i, intf))
-        print("g_subscriber_port_map['%s']=%d" %(intf, i))
-
-if __name__ == '__main__':
-    num = 100
-    if len(sys.argv) > 1:
-        num = int(sys.argv[1])
-    print(header)
-    generate_port_map(num)
diff --git a/src/test/subscriber/portmaps.py b/src/test/subscriber/portmaps.py
deleted file mode 100644
index ceb630c..0000000
--- a/src/test/subscriber/portmaps.py
+++ /dev/null
@@ -1,433 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-###This file is auto-generated. Do not EDIT###
-g_subscriber_port_map = {}
-g_subscriber_port_map[1]='veth0'
-g_subscriber_port_map['veth0']=1
-g_subscriber_port_map[2]='veth2'
-g_subscriber_port_map['veth2']=2
-g_subscriber_port_map[3]='veth4'
-g_subscriber_port_map['veth4']=3
-g_subscriber_port_map[4]='veth6'
-g_subscriber_port_map['veth6']=4
-g_subscriber_port_map[5]='veth8'
-g_subscriber_port_map['veth8']=5
-g_subscriber_port_map[6]='veth10'
-g_subscriber_port_map['veth10']=6
-g_subscriber_port_map[7]='veth12'
-g_subscriber_port_map['veth12']=7
-g_subscriber_port_map[8]='veth14'
-g_subscriber_port_map['veth14']=8
-g_subscriber_port_map[9]='veth16'
-g_subscriber_port_map['veth16']=9
-g_subscriber_port_map[10]='veth18'
-g_subscriber_port_map['veth18']=10
-g_subscriber_port_map[11]='veth20'
-g_subscriber_port_map['veth20']=11
-g_subscriber_port_map[12]='veth22'
-g_subscriber_port_map['veth22']=12
-g_subscriber_port_map[13]='veth24'
-g_subscriber_port_map['veth24']=13
-g_subscriber_port_map[14]='veth26'
-g_subscriber_port_map['veth26']=14
-g_subscriber_port_map[15]='veth28'
-g_subscriber_port_map['veth28']=15
-g_subscriber_port_map[16]='veth30'
-g_subscriber_port_map['veth30']=16
-g_subscriber_port_map[17]='veth32'
-g_subscriber_port_map['veth32']=17
-g_subscriber_port_map[18]='veth34'
-g_subscriber_port_map['veth34']=18
-g_subscriber_port_map[19]='veth36'
-g_subscriber_port_map['veth36']=19
-g_subscriber_port_map[20]='veth38'
-g_subscriber_port_map['veth38']=20
-g_subscriber_port_map[21]='veth40'
-g_subscriber_port_map['veth40']=21
-g_subscriber_port_map[22]='veth42'
-g_subscriber_port_map['veth42']=22
-g_subscriber_port_map[23]='veth44'
-g_subscriber_port_map['veth44']=23
-g_subscriber_port_map[24]='veth46'
-g_subscriber_port_map['veth46']=24
-g_subscriber_port_map[25]='veth48'
-g_subscriber_port_map['veth48']=25
-g_subscriber_port_map[26]='veth50'
-g_subscriber_port_map['veth50']=26
-g_subscriber_port_map[27]='veth52'
-g_subscriber_port_map['veth52']=27
-g_subscriber_port_map[28]='veth54'
-g_subscriber_port_map['veth54']=28
-g_subscriber_port_map[29]='veth56'
-g_subscriber_port_map['veth56']=29
-g_subscriber_port_map[30]='veth58'
-g_subscriber_port_map['veth58']=30
-g_subscriber_port_map[31]='veth60'
-g_subscriber_port_map['veth60']=31
-g_subscriber_port_map[32]='veth62'
-g_subscriber_port_map['veth62']=32
-g_subscriber_port_map[33]='veth64'
-g_subscriber_port_map['veth64']=33
-g_subscriber_port_map[34]='veth66'
-g_subscriber_port_map['veth66']=34
-g_subscriber_port_map[35]='veth68'
-g_subscriber_port_map['veth68']=35
-g_subscriber_port_map[36]='veth70'
-g_subscriber_port_map['veth70']=36
-g_subscriber_port_map[37]='veth72'
-g_subscriber_port_map['veth72']=37
-g_subscriber_port_map[38]='veth74'
-g_subscriber_port_map['veth74']=38
-g_subscriber_port_map[39]='veth76'
-g_subscriber_port_map['veth76']=39
-g_subscriber_port_map[40]='veth78'
-g_subscriber_port_map['veth78']=40
-g_subscriber_port_map[41]='veth80'
-g_subscriber_port_map['veth80']=41
-g_subscriber_port_map[42]='veth82'
-g_subscriber_port_map['veth82']=42
-g_subscriber_port_map[43]='veth84'
-g_subscriber_port_map['veth84']=43
-g_subscriber_port_map[44]='veth86'
-g_subscriber_port_map['veth86']=44
-g_subscriber_port_map[45]='veth88'
-g_subscriber_port_map['veth88']=45
-g_subscriber_port_map[46]='veth90'
-g_subscriber_port_map['veth90']=46
-g_subscriber_port_map[47]='veth92'
-g_subscriber_port_map['veth92']=47
-g_subscriber_port_map[48]='veth94'
-g_subscriber_port_map['veth94']=48
-g_subscriber_port_map[49]='veth96'
-g_subscriber_port_map['veth96']=49
-g_subscriber_port_map[50]='veth98'
-g_subscriber_port_map['veth98']=50
-g_subscriber_port_map[51]='veth100'
-g_subscriber_port_map['veth100']=51
-g_subscriber_port_map[52]='veth102'
-g_subscriber_port_map['veth102']=52
-g_subscriber_port_map[53]='veth104'
-g_subscriber_port_map['veth104']=53
-g_subscriber_port_map[54]='veth106'
-g_subscriber_port_map['veth106']=54
-g_subscriber_port_map[55]='veth108'
-g_subscriber_port_map['veth108']=55
-g_subscriber_port_map[56]='veth110'
-g_subscriber_port_map['veth110']=56
-g_subscriber_port_map[57]='veth112'
-g_subscriber_port_map['veth112']=57
-g_subscriber_port_map[58]='veth114'
-g_subscriber_port_map['veth114']=58
-g_subscriber_port_map[59]='veth116'
-g_subscriber_port_map['veth116']=59
-g_subscriber_port_map[60]='veth118'
-g_subscriber_port_map['veth118']=60
-g_subscriber_port_map[61]='veth120'
-g_subscriber_port_map['veth120']=61
-g_subscriber_port_map[62]='veth122'
-g_subscriber_port_map['veth122']=62
-g_subscriber_port_map[63]='veth124'
-g_subscriber_port_map['veth124']=63
-g_subscriber_port_map[64]='veth126'
-g_subscriber_port_map['veth126']=64
-g_subscriber_port_map[65]='veth128'
-g_subscriber_port_map['veth128']=65
-g_subscriber_port_map[66]='veth130'
-g_subscriber_port_map['veth130']=66
-g_subscriber_port_map[67]='veth132'
-g_subscriber_port_map['veth132']=67
-g_subscriber_port_map[68]='veth134'
-g_subscriber_port_map['veth134']=68
-g_subscriber_port_map[69]='veth136'
-g_subscriber_port_map['veth136']=69
-g_subscriber_port_map[70]='veth138'
-g_subscriber_port_map['veth138']=70
-g_subscriber_port_map[71]='veth140'
-g_subscriber_port_map['veth140']=71
-g_subscriber_port_map[72]='veth142'
-g_subscriber_port_map['veth142']=72
-g_subscriber_port_map[73]='veth144'
-g_subscriber_port_map['veth144']=73
-g_subscriber_port_map[74]='veth146'
-g_subscriber_port_map['veth146']=74
-g_subscriber_port_map[75]='veth148'
-g_subscriber_port_map['veth148']=75
-g_subscriber_port_map[76]='veth150'
-g_subscriber_port_map['veth150']=76
-g_subscriber_port_map[77]='veth152'
-g_subscriber_port_map['veth152']=77
-g_subscriber_port_map[78]='veth154'
-g_subscriber_port_map['veth154']=78
-g_subscriber_port_map[79]='veth156'
-g_subscriber_port_map['veth156']=79
-g_subscriber_port_map[80]='veth158'
-g_subscriber_port_map['veth158']=80
-g_subscriber_port_map[81]='veth160'
-g_subscriber_port_map['veth160']=81
-g_subscriber_port_map[82]='veth162'
-g_subscriber_port_map['veth162']=82
-g_subscriber_port_map[83]='veth164'
-g_subscriber_port_map['veth164']=83
-g_subscriber_port_map[84]='veth166'
-g_subscriber_port_map['veth166']=84
-g_subscriber_port_map[85]='veth168'
-g_subscriber_port_map['veth168']=85
-g_subscriber_port_map[86]='veth170'
-g_subscriber_port_map['veth170']=86
-g_subscriber_port_map[87]='veth172'
-g_subscriber_port_map['veth172']=87
-g_subscriber_port_map[88]='veth174'
-g_subscriber_port_map['veth174']=88
-g_subscriber_port_map[89]='veth176'
-g_subscriber_port_map['veth176']=89
-g_subscriber_port_map[90]='veth178'
-g_subscriber_port_map['veth178']=90
-g_subscriber_port_map[91]='veth180'
-g_subscriber_port_map['veth180']=91
-g_subscriber_port_map[92]='veth182'
-g_subscriber_port_map['veth182']=92
-g_subscriber_port_map[93]='veth184'
-g_subscriber_port_map['veth184']=93
-g_subscriber_port_map[94]='veth186'
-g_subscriber_port_map['veth186']=94
-g_subscriber_port_map[95]='veth188'
-g_subscriber_port_map['veth188']=95
-g_subscriber_port_map[96]='veth190'
-g_subscriber_port_map['veth190']=96
-g_subscriber_port_map[97]='veth192'
-g_subscriber_port_map['veth192']=97
-g_subscriber_port_map[98]='veth194'
-g_subscriber_port_map['veth194']=98
-g_subscriber_port_map[99]='veth196'
-g_subscriber_port_map['veth196']=99
-g_subscriber_port_map[100]='veth198'
-g_subscriber_port_map['veth198']=100
-g_subscriber_port_map[101]='veth200'
-g_subscriber_port_map['veth200']=101
-g_subscriber_port_map[102]='veth202'
-g_subscriber_port_map['veth202']=102
-g_subscriber_port_map[103]='veth204'
-g_subscriber_port_map['veth204']=103
-g_subscriber_port_map[104]='veth206'
-g_subscriber_port_map['veth206']=104
-g_subscriber_port_map[105]='veth208'
-g_subscriber_port_map['veth208']=105
-g_subscriber_port_map[106]='veth210'
-g_subscriber_port_map['veth210']=106
-g_subscriber_port_map[107]='veth212'
-g_subscriber_port_map['veth212']=107
-g_subscriber_port_map[108]='veth214'
-g_subscriber_port_map['veth214']=108
-g_subscriber_port_map[109]='veth216'
-g_subscriber_port_map['veth216']=109
-g_subscriber_port_map[110]='veth218'
-g_subscriber_port_map['veth218']=110
-g_subscriber_port_map[111]='veth220'
-g_subscriber_port_map['veth220']=111
-g_subscriber_port_map[112]='veth222'
-g_subscriber_port_map['veth222']=112
-g_subscriber_port_map[113]='veth224'
-g_subscriber_port_map['veth224']=113
-g_subscriber_port_map[114]='veth226'
-g_subscriber_port_map['veth226']=114
-g_subscriber_port_map[115]='veth228'
-g_subscriber_port_map['veth228']=115
-g_subscriber_port_map[116]='veth230'
-g_subscriber_port_map['veth230']=116
-g_subscriber_port_map[117]='veth232'
-g_subscriber_port_map['veth232']=117
-g_subscriber_port_map[118]='veth234'
-g_subscriber_port_map['veth234']=118
-g_subscriber_port_map[119]='veth236'
-g_subscriber_port_map['veth236']=119
-g_subscriber_port_map[120]='veth238'
-g_subscriber_port_map['veth238']=120
-g_subscriber_port_map[121]='veth240'
-g_subscriber_port_map['veth240']=121
-g_subscriber_port_map[122]='veth242'
-g_subscriber_port_map['veth242']=122
-g_subscriber_port_map[123]='veth244'
-g_subscriber_port_map['veth244']=123
-g_subscriber_port_map[124]='veth246'
-g_subscriber_port_map['veth246']=124
-g_subscriber_port_map[125]='veth248'
-g_subscriber_port_map['veth248']=125
-g_subscriber_port_map[126]='veth250'
-g_subscriber_port_map['veth250']=126
-g_subscriber_port_map[127]='veth252'
-g_subscriber_port_map['veth252']=127
-g_subscriber_port_map[128]='veth254'
-g_subscriber_port_map['veth254']=128
-g_subscriber_port_map[129]='veth256'
-g_subscriber_port_map['veth256']=129
-g_subscriber_port_map[130]='veth258'
-g_subscriber_port_map['veth258']=130
-g_subscriber_port_map[131]='veth260'
-g_subscriber_port_map['veth260']=131
-g_subscriber_port_map[132]='veth262'
-g_subscriber_port_map['veth262']=132
-g_subscriber_port_map[133]='veth264'
-g_subscriber_port_map['veth264']=133
-g_subscriber_port_map[134]='veth266'
-g_subscriber_port_map['veth266']=134
-g_subscriber_port_map[135]='veth268'
-g_subscriber_port_map['veth268']=135
-g_subscriber_port_map[136]='veth270'
-g_subscriber_port_map['veth270']=136
-g_subscriber_port_map[137]='veth272'
-g_subscriber_port_map['veth272']=137
-g_subscriber_port_map[138]='veth274'
-g_subscriber_port_map['veth274']=138
-g_subscriber_port_map[139]='veth276'
-g_subscriber_port_map['veth276']=139
-g_subscriber_port_map[140]='veth278'
-g_subscriber_port_map['veth278']=140
-g_subscriber_port_map[141]='veth280'
-g_subscriber_port_map['veth280']=141
-g_subscriber_port_map[142]='veth282'
-g_subscriber_port_map['veth282']=142
-g_subscriber_port_map[143]='veth284'
-g_subscriber_port_map['veth284']=143
-g_subscriber_port_map[144]='veth286'
-g_subscriber_port_map['veth286']=144
-g_subscriber_port_map[145]='veth288'
-g_subscriber_port_map['veth288']=145
-g_subscriber_port_map[146]='veth290'
-g_subscriber_port_map['veth290']=146
-g_subscriber_port_map[147]='veth292'
-g_subscriber_port_map['veth292']=147
-g_subscriber_port_map[148]='veth294'
-g_subscriber_port_map['veth294']=148
-g_subscriber_port_map[149]='veth296'
-g_subscriber_port_map['veth296']=149
-g_subscriber_port_map[150]='veth298'
-g_subscriber_port_map['veth298']=150
-g_subscriber_port_map[151]='veth300'
-g_subscriber_port_map['veth300']=151
-g_subscriber_port_map[152]='veth302'
-g_subscriber_port_map['veth302']=152
-g_subscriber_port_map[153]='veth304'
-g_subscriber_port_map['veth304']=153
-g_subscriber_port_map[154]='veth306'
-g_subscriber_port_map['veth306']=154
-g_subscriber_port_map[155]='veth308'
-g_subscriber_port_map['veth308']=155
-g_subscriber_port_map[156]='veth310'
-g_subscriber_port_map['veth310']=156
-g_subscriber_port_map[157]='veth312'
-g_subscriber_port_map['veth312']=157
-g_subscriber_port_map[158]='veth314'
-g_subscriber_port_map['veth314']=158
-g_subscriber_port_map[159]='veth316'
-g_subscriber_port_map['veth316']=159
-g_subscriber_port_map[160]='veth318'
-g_subscriber_port_map['veth318']=160
-g_subscriber_port_map[161]='veth320'
-g_subscriber_port_map['veth320']=161
-g_subscriber_port_map[162]='veth322'
-g_subscriber_port_map['veth322']=162
-g_subscriber_port_map[163]='veth324'
-g_subscriber_port_map['veth324']=163
-g_subscriber_port_map[164]='veth326'
-g_subscriber_port_map['veth326']=164
-g_subscriber_port_map[165]='veth328'
-g_subscriber_port_map['veth328']=165
-g_subscriber_port_map[166]='veth330'
-g_subscriber_port_map['veth330']=166
-g_subscriber_port_map[167]='veth332'
-g_subscriber_port_map['veth332']=167
-g_subscriber_port_map[168]='veth334'
-g_subscriber_port_map['veth334']=168
-g_subscriber_port_map[169]='veth336'
-g_subscriber_port_map['veth336']=169
-g_subscriber_port_map[170]='veth338'
-g_subscriber_port_map['veth338']=170
-g_subscriber_port_map[171]='veth340'
-g_subscriber_port_map['veth340']=171
-g_subscriber_port_map[172]='veth342'
-g_subscriber_port_map['veth342']=172
-g_subscriber_port_map[173]='veth344'
-g_subscriber_port_map['veth344']=173
-g_subscriber_port_map[174]='veth346'
-g_subscriber_port_map['veth346']=174
-g_subscriber_port_map[175]='veth348'
-g_subscriber_port_map['veth348']=175
-g_subscriber_port_map[176]='veth350'
-g_subscriber_port_map['veth350']=176
-g_subscriber_port_map[177]='veth352'
-g_subscriber_port_map['veth352']=177
-g_subscriber_port_map[178]='veth354'
-g_subscriber_port_map['veth354']=178
-g_subscriber_port_map[179]='veth356'
-g_subscriber_port_map['veth356']=179
-g_subscriber_port_map[180]='veth358'
-g_subscriber_port_map['veth358']=180
-g_subscriber_port_map[181]='veth360'
-g_subscriber_port_map['veth360']=181
-g_subscriber_port_map[182]='veth362'
-g_subscriber_port_map['veth362']=182
-g_subscriber_port_map[183]='veth364'
-g_subscriber_port_map['veth364']=183
-g_subscriber_port_map[184]='veth366'
-g_subscriber_port_map['veth366']=184
-g_subscriber_port_map[185]='veth368'
-g_subscriber_port_map['veth368']=185
-g_subscriber_port_map[186]='veth370'
-g_subscriber_port_map['veth370']=186
-g_subscriber_port_map[187]='veth372'
-g_subscriber_port_map['veth372']=187
-g_subscriber_port_map[188]='veth374'
-g_subscriber_port_map['veth374']=188
-g_subscriber_port_map[189]='veth376'
-g_subscriber_port_map['veth376']=189
-g_subscriber_port_map[190]='veth378'
-g_subscriber_port_map['veth378']=190
-g_subscriber_port_map[191]='veth380'
-g_subscriber_port_map['veth380']=191
-g_subscriber_port_map[192]='veth382'
-g_subscriber_port_map['veth382']=192
-g_subscriber_port_map[193]='veth384'
-g_subscriber_port_map['veth384']=193
-g_subscriber_port_map[194]='veth386'
-g_subscriber_port_map['veth386']=194
-g_subscriber_port_map[195]='veth388'
-g_subscriber_port_map['veth388']=195
-g_subscriber_port_map[196]='veth390'
-g_subscriber_port_map['veth390']=196
-g_subscriber_port_map[197]='veth392'
-g_subscriber_port_map['veth392']=197
-g_subscriber_port_map[198]='veth394'
-g_subscriber_port_map['veth394']=198
-g_subscriber_port_map[199]='veth396'
-g_subscriber_port_map['veth396']=199
-g_subscriber_port_map[200]='veth398'
-g_subscriber_port_map['veth398']=200
diff --git a/src/test/subscriber/subscriberDb.py b/src/test/subscriber/subscriberDb.py
deleted file mode 100644
index 4b9ba79..0000000
--- a/src/test/subscriber/subscriberDb.py
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import sqlite3
-import sys
-
-class SubscriberDB:
-
-    default_services = ('TLS', 'DHCP', 'IGMP')
-
-    def __init__(self, db = 'subscriber.db', create = False, services = default_services):
-        self.db = db
-        self.con = sqlite3.connect(db)
-        self.con.row_factory = sqlite3.Row
-        self.cur = self.con.cursor()
-        self.services = services
-        self.create = create
-        if create == True:
-            self.cur.execute("DROP TABLE IF EXISTS Subscriber")
-            self.cur.execute("CREATE TABLE Subscriber(Id INTEGER PRIMARY KEY, Name TEXT, Service TEXT);")
-
-    def load(self, name, service):
-        self.cur.execute("INSERT INTO Subscriber(Name, Service) VALUES (?, ?);", (name, service))
-
-    def commit(self):
-        self.con.commit()
-
-    def generate(self, num = 100):
-        #create db if not created
-        if self.create is False:
-            self.cur.execute("DROP TABLE IF EXISTS Subscriber")
-            self.cur.execute("CREATE TABLE Subscriber(Id INTEGER PRIMARY KEY, Name TEXT, Service TEXT);")
-            self.create = True
-        service = ' '.join(self.services)
-        for i in xrange(num):
-            name = "sub%d" %self.lastrowid()
-            self.load(name, service)
-        self.commit()
-
-    def read(self, num = 1000000, debug = False):
-        self.cur.execute("SELECT * FROM Subscriber LIMIT ?;", (num,))
-        rows = self.cur.fetchall()
-        if debug is True:
-            for row in rows:
-                print('Id %d, Name %s, Service %s' %(row['Id'], row['Name'], row['Service']))
-        return rows
-
-    def lastrowid(self):
-        return 0 if self.cur.lastrowid == None else self.cur.lastrowid
-
-if __name__ == "__main__":
-    create = False
-    if len(sys.argv) > 1:
-        try:
-            num_subscribers = int(sys.argv[1])
-        except:
-            num_subscribers = 100
-        print('Creating %d subscriber records' %num_subscribers)
-        create = True
-    sub = SubscriberDB(create = create)
-    if create == True:
-        sub.generate(num_subscribers)
-    else:
-        num_subscribers = 10
-    subscribers = sub.read(num_subscribers)
-    for s in subscribers:
-        print('Name %s, Service %s' %(s['Name'], s['Service']))
diff --git a/src/test/subscriber/subscriberTest.py b/src/test/subscriber/subscriberTest.py
deleted file mode 100644
index 6850468..0000000
--- a/src/test/subscriber/subscriberTest.py
+++ /dev/null
@@ -1,1296 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-import time, monotonic
-import os, sys
-import tempfile
-import random
-import threading
-from Stats import Stats
-from OnosCtrl import OnosCtrl
-from DHCP import DHCPTest
-from EapTLS import TLSAuthTest
-from Channels import Channels, IgmpChannel
-from subscriberDb import SubscriberDB
-from threadPool import ThreadPool
-from portmaps import g_subscriber_port_map
-from OltConfig import *
-from CordContainer import *
-from CordTestServer import cord_test_radius_restart
-from CordLogger import CordLogger
-from CordTestUtils import log_test as log
-import copy
-log.setLevel('INFO')
-DEFAULT_NO_CHANNELS = 1
-
-class Subscriber(Channels):
-      PORT_TX_DEFAULT = 2
-      PORT_RX_DEFAULT = 1
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      STATS_RX = 0
-      STATS_TX = 1
-      STATS_JOIN = 2
-      STATS_LEAVE = 3
-      SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
-
-
-      def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
-                   num = 1, channel_start = 0,
-                   tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
-                   iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
-                   mcast_cb = None, loginType = 'wireless'):
-            self.tx_port = tx_port
-            self.rx_port = rx_port
-            self.port_map = port_map or g_subscriber_port_map
-            try:
-                  self.tx_intf = self.port_map[tx_port]
-                  self.rx_intf = self.port_map[rx_port]
-            except:
-                  self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-                  self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-
-            Channels.__init__(self, num, channel_start = channel_start,
-                              iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
-            self.name = name
-            self.service = service
-            self.service_map = {}
-            services = self.service.strip().split(' ')
-            for s in services:
-                  self.service_map[s] = True
-            self.loginType = loginType
-            ##start streaming channels
-            self.join_map = {}
-            ##accumulated join recv stats
-            self.join_rx_stats = Stats()
-
-      def has_service(self, service):
-            if self.service_map.has_key(service):
-                  return self.service_map[service]
-            if self.service_map.has_key(service.upper()):
-                  return self.service_map[service.upper()]
-            return False
-
-      def channel_join_update(self, chan, join_time):
-            self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
-            self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
-
-      def channel_join(self, chan = 0, delay = 2):
-            '''Join a channel and create a send/recv stats map'''
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.delay = delay
-            chan, join_time = self.join(chan)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_join_next(self, delay = 2):
-            '''Joins the next channel leaving the last channel'''
-            if self.last_chan:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.join_next()
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_jump(self, delay = 2):
-            '''Jumps randomly to the next channel leaving the last channel'''
-            log.info("Jumps randomly to the next channel leaving the last channel")
-            if self.last_chan is not None:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.jump()
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_leave(self, chan = 0):
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.leave(chan)
-
-      def channel_update(self, chan, stats_type, packets, t=0):
-            if type(chan) == type(0):
-                  chan_list = (chan,)
-            else:
-                  chan_list = chan
-            for c in chan_list:
-                  if self.join_map.has_key(c):
-                        self.join_map[c][stats_type].update(packets = packets, t = t)
-
-      def channel_receive(self, chan, cb = None, count = 1):
-            log.info('Subscriber %s receiving from group %s, channel %d' %(self.name, self.gaddr(chan), chan))
-            self.recv(chan, cb = cb, count = count)
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            log.debug('Packet received for group %s, subscriber %s' %(pkt[IP].dst, self.name))
-            chan = self.caddr(pkt[IP].dst)
-            assert_equal(chan in self.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.join_map[chan][self.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.channel_update(chan, self.STATS_RX, 1, t = delta)
-            log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-
-class subscriber_pool:
-
-      def __init__(self, subscriber, test_cbs, test_status):
-            self.subscriber = subscriber
-            self.test_cbs = test_cbs
-            self.test_status = test_status
-
-      def pool_cb(self):
-            for cb in self.test_cbs:
-                  if cb:
-                        self.test_status = cb(self.subscriber)
-#                        cb(self.subscriber)
-                        if self.test_status is not True:
-                           log.info('This service is failed and other services will not run for this subscriber')
-                           break
-            log.info('This Subscriber is tested for multiple service elgibility ')
-            self.test_status = True
-
-class subscriber_exchange(CordLogger):
-
-      apps = [ 'org.opencord.aaa', 'org.onosproject.dhcp' ]
-
-      dhcp_app = 'org.onosproject.dhcp'
-
-      olt_apps = [ 'org.opencord.igmp', 'org.opencord.cordmcast' ]
-      dhcp_server_config = {
-        "ip": "10.1.11.50",
-        "mac": "ca:fe:ca:fe:ca:fe",
-        "subnet": "255.255.252.0",
-        "broadcast": "10.1.11.255",
-        "router": "10.1.8.1",
-        "domain": "8.8.8.8",
-        "ttl": "63",
-        "delay": "2",
-        "startip": "10.1.11.51",
-        "endip": "10.1.11.100"
-      }
-
-      aaa_loaded = False
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      SUBSCRIBER_TIMEOUT = 20
-
-      CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-      CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-
-      def setUp(self):
-          '''Load the OLT config and activate relevant apps'''
-          super(subscriber_exchange, self).setUp()
-          self.olt = OltConfig()
-          self.port_map, _ = self.olt.olt_port_map()
-          ##if no olt config, fall back to ovs port map
-          if not self.port_map:
-                self.port_map = g_subscriber_port_map
-          else:
-                log.info('Using OLT Port configuration for test setup')
-                log.info('Configuring CORD OLT access device information')
-                OnosCtrl.cord_olt_config(self.olt)
-                self.activate_apps(self.olt_apps)
-
-          self.activate_apps(self.apps)
-
-      def tearDown(self):
-          '''Deactivate the dhcp app'''
-          super(subscriber_exchange, self).tearDown()
-          for app in self.apps:
-              onos_ctrl = OnosCtrl(app)
-              onos_ctrl.deactivate()
-          log.info('Restarting the Radius container in the setup after running every subscriber test cases by default')
-          cord_test_radius_restart()
-          #os.system('ifconfig '+INTF_RX_DEFAULT+' up')
-
-      def activate_apps(self, apps):
-            for app in apps:
-                  onos_ctrl = OnosCtrl(app)
-                  status, _ = onos_ctrl.activate()
-                  assert_equal(status, True)
-                  time.sleep(2)
-
-      def onos_aaa_load(self):
-            if self.aaa_loaded:
-                  return
-            OnosCtrl.aaa_load_config()
-            self.aaa_loaded = True
-
-      def onos_dhcp_table_load(self, config = None):
-          dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
-          dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
-          if config:
-              for k in config.keys():
-                  if dhcp_config.has_key(k):
-                      dhcp_config[k] = config[k]
-          self.onos_load_config('org.onosproject.dhcp', dhcp_dict)
-
-      def send_recv(self, mac = None, update_seed = False, validate = True):
-          cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
-          if validate:
-             assert_not_equal(cip, None)
-             assert_not_equal(sip, None)
-             log.info('Got dhcp client IP %s from server %s for mac %s' %
-                     (cip, sip, self.dhcp.get_mac(cip)[0]))
-          return cip,sip
-
-      def onos_load_config(self, app, config):
-          status, code = OnosCtrl.config(config)
-          if status is False:
-             log.info('JSON config request for app %s returned status %d' %(app, code))
-             assert_equal(status, True)
-          time.sleep(2)
-
-      def dhcp_sndrcv(self, dhcp, update_seed = False):
-            cip, sip = dhcp.discover(update_seed = update_seed)
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-            log.info('Got dhcp client IP %s from server %s for mac %s' %
-                     (cip, sip, dhcp.get_mac(cip)[0]))
-            return cip,sip
-
-      def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
-            config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
-                      'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                      'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-            self.onos_dhcp_table_load(config)
-            dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
-            cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
-            return cip, sip
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            chan = self.subscriber.caddr(pkt[IP].dst)
-            assert_equal(chan in self.subscriber.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
-            log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-            self.test_status = True
-
-      def tls_verify(self, subscriber):
-            if subscriber.has_service('TLS'):
-                  time.sleep(2)
-                  tls = TLSAuthTest()
-                  log.info('Running subscriber %s tls auth test' %subscriber.name)
-                  tls.runTest()
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_verify(self, subscriber):
-            cip, sip = self.dhcp_request(subscriber, update_seed = True)
-            log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-            subscriber.src_list = [cip]
-            self.test_status = True
-            return self.test_status
-
-      def dhcp_jump_verify(self, subscriber):
-          cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
-          log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-          subscriber.src_list = [cip]
-          self.test_status = True
-          return self.test_status
-
-      def dhcp_next_verify(self, subscriber):
-          cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
-          log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-          subscriber.src_list = [cip]
-          self.test_status = True
-          return self.test_status
-
-      def igmp_verify(self, subscriber):
-            chan = 0
-            if subscriber.has_service('IGMP'):
-                  for i in range(5):
-                        log.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_join(chan, delay = 0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_leave(chan)
-                        time.sleep(3)
-                        log.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_verify_multiChannel(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for chan in range(DEFAULT_NO_CHANNELS):
-                        log.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_join(chan, delay = 0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_leave(chan)
-                        time.sleep(3)
-                        log.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_jump_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        log.info('Subscriber %s jumping channel' %subscriber.name)
-                        chan = subscriber.channel_jump(delay=0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_next_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        if i:
-                              chan = subscriber.channel_join_next(delay=0)
-                        else:
-                              chan = subscriber.channel_join(i, delay=0)
-                        log.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-                        log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def generate_port_list(self, subscribers, channels):
-            port_list = []
-            for i in xrange(subscribers):
-                  if channels > 1:
-                        rx_port = 2*i+1
-                        tx_port = 2*i+2
-                  else:
-                        rx_port = Subscriber.PORT_RX_DEFAULT
-                        tx_port = Subscriber.PORT_TX_DEFAULT
-                  port_list.append((tx_port, rx_port))
-            return port_list
-
-      def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = []):
-            '''Load the subscriber from the database'''
-            self.subscriber_db = SubscriberDB(create = create)
-            if create is True:
-                  self.subscriber_db.generate(num)
-            self.subscriber_info = self.subscriber_db.read(num)
-            self.subscriber_list = []
-            if not port_list:
-                  port_list = self.generate_port_list(num, num_channels)
-
-            index = 0
-            for info in self.subscriber_info:
-                  self.subscriber_list.append(Subscriber(name=info['Name'],
-                                                         service=info['Service'],
-                                                         port_map = self.port_map,
-                                                         num=num_channels,
-                                                         channel_start = channel_start,
-                                                         tx_port = port_list[index][0],
-                                                         rx_port = port_list[index][1]))
-                  if num_channels > 1:
-                        channel_start += num_channels
-                  index += 1
-
-            #load the ssm list for all subscriber channels
-            igmpChannel = IgmpChannel()
-            ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
-            ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
-            igmpChannel.igmp_load_ssm_config(ssm_list)
-            #load the subscriber to mcast port map for cord
-            cord_port_map = {}
-            for sub in self.subscriber_list:
-                  for chan in sub.channels:
-                        cord_port_map[chan] = (sub.tx_port, sub.rx_port)
-
-            igmpChannel.cord_port_table_load(cord_port_map)
-
-      def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
-                                  channel_start = 0, cbs = None, port_list = [], negative_subscriber_auth = None):
-          self.test_status = False
-          self.num_subscribers = num_subscribers
-          self.sub_loop_count =  num_subscribers
-          self.subscriber_load(create = True, num = num_subscribers,
-                               num_channels = num_channels, channel_start = channel_start, port_list = port_list)
-          self.onos_aaa_load()
-          self.thread_pool = ThreadPool(min(100, self.num_subscribers), queue_size=1, wait_timeout=1)
-
-          if cbs and negative_subscriber_auth is None:
-                cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
-          cbs_negative = cbs
-          for subscriber in self.subscriber_list:
-                subscriber.start()
-                if negative_subscriber_auth is 'half' and self.sub_loop_count%2 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
-                elif negative_subscriber_auth is 'onethird' and self.sub_loop_count%3 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
-                else:
-                   cbs = cbs_negative
-                self.sub_loop_count = self.sub_loop_count - 1
-                pool_object = subscriber_pool(subscriber, cbs, self.test_status)
-                self.thread_pool.addTask(pool_object.pool_cb)
-          self.thread_pool.cleanUpThreads()
-          for subscriber in self.subscriber_list:
-                subscriber.stop()
-          print "self.test_status %s\n"%(self.test_status)
-          return self.test_status
-
-      def tls_invalid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-
-      def tls_no_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = '')
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-
-      def tls_self_signed_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-
-      def tls_Nsubscribers_use_same_valid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log.info('Running subscriber %s tls auth test' %subscriber.name)
-             num_users = 3
-             for i in xrange(num_users):
-                 tls = TLSAuthTest(intf = 'veth{}'.format(i*2))
-                 tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-
-      def dhcp_discover_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-             time.sleep(2)
-             log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-             t1 = self.subscriber_dhcp_1release()
-             self.test_status = True
-             return self.test_status
-
-      def subscriber_dhcp_1release(self, iface = INTF_RX_DEFAULT):
-             config = {'startip':'10.10.100.20', 'endip':'10.10.100.21',
-                       'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
-                       'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
-             self.onos_dhcp_table_load(config)
-             self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-             cip, sip = self.send_recv()
-             log.info('Releasing ip %s to server %s' %(cip, sip))
-             assert_equal(self.dhcp.release(cip), True)
-             log.info('Triggering DHCP discover again after release')
-             cip2, sip2 = self.send_recv(update_seed = True)
-             log.info('Verifying released IP was given back on rediscover')
-             assert_equal(cip, cip2)
-             log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-             assert_equal(self.dhcp.release(cip2), True)
-
-
-      def dhcp_client_reboot_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_request_after_reboot()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_client_request_after_reboot(self, iface = INTF_RX_DEFAULT):
-          #''' Client sends DHCP Request after reboot.'''
-
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log.info("Got DHCP server NAK.")
-                os.system('ifconfig '+iface+' down')
-                log.info('Client goes down.')
-                log.info('Delay for 5 seconds.')
-
-                time.sleep(5)
-
-                os.system('ifconfig '+iface+' up')
-                log.info('Client is up now.')
-
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                elif new_cip != None:
-                        log.info("Got DHCP ACK.")
-
-
-      def dhcp_client_renew_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_renew_time()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_client_renew_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-                if new_cip and new_sip and lval:
-                        log.info("Client 's Renewal time is :%s",lval)
-                        log.info("Generating delay till renewal time.")
-                        time.sleep(lval)
-                        log.info("Client Sending Unicast DHCP request.")
-                        latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-                        if latest_cip and latest_sip:
-                                log.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                (latest_cip, mac, latest_sip) )
-
-                        elif latest_cip == None:
-                                log.info("Got DHCP NAK. Lease not renewed.")
-                elif new_cip == None or new_sip == None or lval == None:
-                        log.info("Got DHCP NAK.")
-
-
-      def dhcp_server_reboot_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_server_after_reboot()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_server_after_reboot(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP server goes down.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                log.info('Getting DHCP server Down.')
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                onos_ctrl.deactivate()
-                for i in range(0,4):
-                        log.info("Sending DHCP Request.")
-                        log.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log.info('')
-                                log.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-                log.info('Getting DHCP server Up.')
-#               self.activate_apps(self.dhcp_app)
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                status, _ = onos_ctrl.activate()
-                assert_equal(status, True)
-                time.sleep(3)
-                for i in range(0,4):
-                        log.info("Sending DHCP Request after DHCP server is up.")
-                        log.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log.info('')
-                                log.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-
-      def dhcp_client_rebind_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_rebind_time()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_client_rebind_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-                if new_cip and new_sip and lval:
-                        log.info("Client 's Rebind time is :%s",lval)
-                        log.info("Generating delay till rebind time.")
-                        time.sleep(lval)
-                        log.info("Client Sending broadcast DHCP requests for renewing lease or for getting new ip.")
-                        self.dhcp.after_T2 = True
-                        for i in range(0,4):
-                                latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-                                if latest_cip and latest_sip:
-                                        log.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                        (latest_cip, mac, latest_sip) )
-                                        break
-                                elif latest_cip == None:
-                                        log.info("Got DHCP NAK. Lease not renewed.")
-                        assert_not_equal(latest_cip, None)
-                elif new_cip == None or new_sip == None or lval == None:
-                        log.info("Got DHCP NAK.Lease not Renewed.")
-
-      def dhcp_starvation_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_starvation()
-                  self.test_status = True
-                  return self.test_status
-
-
-      def subscriber_dhcp_starvation(self, iface = INTF_RX_DEFAULT):
-          '''DHCP starve'''
-          config = {'startip':'182.17.0.20', 'endip':'182.17.0.69',
-                    'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-          log.info('Verifying 1 ')
-          for x in xrange(50):
-              mac = RandMAC()._fix()
-              self.send_recv(mac = mac)
-          log.info('Verifying 2 ')
-          cip, sip = self.send_recv(update_seed = True, validate = False)
-          assert_equal(cip, None)
-          assert_equal(sip, None)
-
-      def dhcp_same_client_multi_discovers_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_same_client_multiple_discover()
-                  self.test_status = True
-                  return self.test_status
-
-
-      def subscriber_dhcp_same_client_multiple_discover(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple discover . '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-                  (cip, sip, mac) )
-          log.info('Triggering DHCP discover again.')
-          new_cip, new_sip, new_mac , lval = self.dhcp.only_discover()
-          if cip == new_cip:
-                 log.info('Got same ip for 2nd DHCP discover for client IP %s from server %s for mac %s. Triggering DHCP Request. '
-                          % (new_cip, new_sip, new_mac) )
-          elif cip != new_cip:
-                log.info('Ip after 1st discover %s' %cip)
-                log.info('Map after 2nd discover %s' %new_cip)
-                assert_equal(cip, new_cip)
-
-      def dhcp_same_client_multi_request_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_same_client_multiple_request()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_same_client_multiple_request(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple repeat DHCP requests. '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          log.info('Sending DHCP discover and DHCP request.')
-          cip, sip = self.send_recv()
-          mac = self.dhcp.get_mac(cip)[0]
-          log.info("Sending DHCP request again.")
-          new_cip, new_sip = self.dhcp.only_request(cip, mac)
-          if (new_cip,new_sip) == (cip,sip):
-                log.info('Got same ip for 2nd DHCP Request for client IP %s from server %s for mac %s.'
-                          % (new_cip, new_sip, mac) )
-          elif (new_cip,new_sip):
-                log.info('No DHCP ACK')
-                assert_equal(new_cip, None)
-                assert_equal(new_sip, None)
-          else:
-                print "Something went wrong."
-
-
-      def dhcp_client_desired_ip_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_desired_address()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_client_desired_address(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.31', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover(desired = True)
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-          elif cip != self.dhcp.seed_ip:
-                log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_equal(cip, self.dhcp.seed_ip)
-
-      def dhcp_client_request_pkt_with_non_offered_ip_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_server_nak_packet()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_server_nak_packet(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-                if new_cip == None:
-                        log.info("Got DHCP server NAK.")
-                        assert_equal(new_cip, None)  #Negative Test Case
-
-      def dhcp_client_requested_out_pool_ip_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_desired_address_out_of_pool()
-                  self.test_status = True
-                  return self.test_status
-
-
-      def subscriber_dhcp_client_desired_address_out_of_pool(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address from out of pool.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover(desired = True)
-          log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-                assert_equal(cip, self.dhcp.seed_ip) #Negative Test Case
-
-          elif cip != self.dhcp.seed_ip:
-                log.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_not_equal(cip, self.dhcp.seed_ip)
-
-          elif cip == None:
-                log.info('Got DHCP NAK')
-
-
-      def dhcp_client_specific_lease_scenario(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_specific_lease_packet()
-                  self.test_status = True
-                  return self.test_status
-
-      def subscriber_dhcp_specific_lease_packet(self, iface = INTF_RX_DEFAULT):
-          ''' Client sends DHCP Discover packet for particular lease time.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          log.info('Sending DHCP discover with lease time of 700')
-          cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True)
-
-          log.info("Verifying Client 's IP and mac in DHCP Offer packet.")
-          if (cip == None and mac != None):
-                log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif lval != 700:
-                log.info('Getting dhcp client IP %s from server %s for mac %s with lease time %s. That is not 700.' %
-                         (cip, sip, mac, lval) )
-                assert_not_equal(lval, 700)
-
-      def test_subscriber_join_recv_channel(self):
-          ###"""Test subscriber join and receive"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_jump_channel(self):
-          ###"""Test subscriber join and receive for channel surfing"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_next_channel(self):
-          ###"""Test subscriber join next for channels"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
-          assert_equal(test_status, True)
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      def test_subscriber_authentication_with_invalid_certificate_and_channel_surfing(self):
-          ### """Test subscriber to auth with invalidCertification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_invalid_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_invalid_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                                                  negative_subscriber_auth = 'all')
-              assert_equal(test_status, False)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_invalid_cert, df)
-          return df
-
-
-      #@deferred(SUBSCRIBER_TIMEOUT)
-      def test_subscriber_authentication_with_no_certificate_and_channel_surfing(self):
-          ### """Test subscriber to auth with No Certification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          df = defer.Deferred()
-          def sub_auth_no_cert(df):
-              test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                        num_channels = num_channels,
-                                                        cbs = (self.tls_no_cert, self.dhcp_verify, self.igmp_verify),
-                                                        port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                        negative_subscriber_auth = 'all')
-              assert_equal(test_status, False)
-              df.callback(0)
-          reactor.callLater(0, sub_auth_no_cert, df)
-          return df
-
-      def test_subscriber_authentication_with_self_signed_certificate_and_channel_surfing(self):
-          ### """Test subscriber to auth with Self Signed Certification and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                           num_channels = num_channels,
-                                           cbs = (self.tls_self_signed_cert, self.dhcp_verify, self.igmp_verify),
-                                           port_list = self.generate_port_list(num_subscribers, num_channels),
-                                           negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_discover_and_channel_surfing(self):
-          ### """Test subscriber auth success, DHCP re-discover with DHCP server and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                   num_channels = num_channels,
-                                                   cbs = (self.tls_verify, self.dhcp_discover_scenario, self.igmp_verify),
-                                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_client_reboot_scenario_and_channel_surfing(self):
-          ### """Test subscriber auth success, DHCP client got re-booted and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_reboot_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_server_reboot_scenario_and_channel_surfing(self):
-          ### """Test subscriber auth , DHCP server re-boot during DHCP process and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                              num_channels = num_channels,
-                                              cbs = (self.tls_verify, self.dhcp_server_reboot_scenario, self.igmp_verify),
-                                              port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_client_rebind_and_channel_surfing(self):
-          ### """Test subscriber auth , DHCP client rebind IP and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_client_rebind_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_subscriber_authentication_with_dhcp_starvation_scenario_and_channel_surfing(self):
-          ### """Test subscriber auth , DHCP starvation and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                num_channels = num_channels,
-                                                cbs = (self.tls_verify, self.dhcp_starvation_scenario, self.igmp_verify),
-                                                port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_multiple_dhcp_discover_for_same_subscriber_and_channel_surfing(self):
-          ### """Test subscriber auth , sending same DHCP client discover multiple times and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                   num_channels = num_channels,
-                                   cbs = (self.tls_verify, self.dhcp_same_client_multi_discovers_scenario, self.igmp_verify),
-                                   port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_multiple_dhcp_request_for_same_subscriber_and_channel_surfing(self):
-          ### """Test subscriber auth , same DHCP client multiple requerts times and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                     num_channels = num_channels,
-                                     cbs = (self.tls_verify, self.dhcp_same_client_multi_request_scenario, self.igmp_verify),
-                                     port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_client_requested_ip_and_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client requesting ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                     num_channels = num_channels,
-                                     cbs = (self.tls_verify, self.dhcp_client_desired_ip_scenario, self.igmp_verify),
-                                     port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_non_offered_ip_and_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client request for non-offered ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                        num_channels = num_channels,
-                        cbs = (self.tls_verify, self.dhcp_client_request_pkt_with_non_offered_ip_scenario, self.igmp_verify),
-                        port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_request_out_of_pool_ip_by_client_and_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client requesting out of pool ip and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_verify, self.dhcp_client_requested_out_pool_ip_scenario, self.igmp_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_authentication_with_dhcp_specified_lease_time_functionality_and_channel_surfing(self):
-          ### """Test subscriber auth with DHCP client specifying lease time and join channel"""
-          num_subscribers = 1
-          num_channels = 1
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                               num_channels = num_channels,
-                               cbs = (self.tls_verify, self.dhcp_client_specific_lease_scenario, self.igmp_verify),
-                               port_list = self.generate_port_list(num_subscribers, num_channels),                                                          negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_subscriber_join_recv_100channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_recv_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_recv_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_recv_1200channels(self):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_recv_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_jump_100channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_subscriber_join_jump_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_subscriber_join_jump_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_subscriber_join_jump_1200channel(sself):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-      def test_subscriber_join_jump_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_next_100channels(self):
-          num_subscribers = 1
-          num_channels = 100
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_next_400channels(self):
-          num_subscribers = 1
-          num_channels = 400
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_next_800channels(self):
-          num_subscribers = 1
-          num_channels = 800
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-
-      def test_subscriber_join_next_1200channels(self):
-          num_subscribers = 1
-          num_channels = 1200
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
-
-      def test_subscriber_join_next_1500channels(self):
-          num_subscribers = 1
-          num_channels = 1500
-          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers,
-                                                    num_channels = num_channels,
-                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
-                                                    port_list = self.generate_port_list(num_subscribers, num_channels),
-                                                    negative_subscriber_auth = 'all')
-          assert_equal(test_status, True)
diff --git a/src/test/tls/__init__.py b/src/test/tls/__init__.py
deleted file mode 100644
index 7f2419d..0000000
--- a/src/test/tls/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/tls/tlsTest.json b/src/test/tls/tlsTest.json
deleted file mode 100644
index 105156a..0000000
--- a/src/test/tls/tlsTest.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "VOLTHA_OLT_MAC": "00:0c:e2:31:12:00", 
-    "VOLTHA_HOST": "172.17.0.1", 
-    "VOLTHA_OLT_TYPE": "ponsim_olt", 
-    "VOLTHA_REST_PORT": 8882, 
-    "VOLTHA_TEARDOWN": false
-}
\ No newline at end of file
diff --git a/src/test/tls/tlsTest.py b/src/test/tls/tlsTest.py
deleted file mode 100644
index 8735f02..0000000
--- a/src/test/tls/tlsTest.py
+++ /dev/null
@@ -1,465 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import time
-import os
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from EapTLS import TLSAuthTest
-from OnosCtrl import OnosCtrl
-from CordLogger import CordLogger
-from CordTestUtils import log_test
-from CordTestConfig import setup_module, teardown_module
-from VolthaCtrl import VolthaCtrl
-from scapy.all import *
-from scapy_ssl_tls.ssl_tls import *
-from scapy_ssl_tls.ssl_tls_crypto import *
-log_test.setLevel('INFO')
-
-class eap_auth_exchange(CordLogger):
-
-    app = 'org.opencord.aaa'
-    TLS_TIMEOUT = 20
-    TEST_TIMEOUT = 3600
-    VOLTHA_HOST = None
-    VOLTHA_REST_PORT = VolthaCtrl.REST_PORT
-    VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-    VOLTHA_OLT_TYPE = 'simulated_olt'
-    VOLTHA_OLT_MAC = '00:0c:e2:31:12:00'
-    VOLTHA_UPLINK_VLAN_MAP = { 'of:0000000000000001' : '222' }
-    #this is from ca.pem file
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAN3OagiHm6AXMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECAwCQ0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UE
-CgwKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDDB1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNzAzMTEw
-MDQ3NDNaFw0yMjEwMzEwMDQ3NDNaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECAwC
-Q0ExEjAQBgNVBAcMCVNvbWV3aGVyZTETMBEGA1UECgwKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDDB1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBALYkVvncfeRel/apXy5iODla5H7sUpU7a+pwT7nephmjKDh0GPX/t5GUwgkB
-1zQAEj0IPoxZIfSAGSFP/mqTUK2sm7qerArih0E3kBRpnBKJZB/4r1OTZ04CsuRQ
-QJOqcI0mZJWUKEcahN4yZvRyxeiCeFFoc0Nw787MQHhD9lZTqJUoAvautUe1GCjG
-46DS4MzpWNGkqn5/ZC8lQ198AceMwf2pJRuOQg5cPwp65+dKNLUMLiSUV7JpvmAo
-of4MHtGaBxKHESZ2jPiNTT2uKI/7KxH3Pr/ctft3bcSX2d4q49B2tdEIRzC0ankm
-CrxFcq9Cb3MGaNuwWAtk3fOGKusCAwEAAaOCASwwggEoMB0GA1UdDgQWBBRtf8rH
-zJW7rliW1eZnbVbSb3obfDCBwAYDVR0jBIG4MIG1gBRtf8rHzJW7rliW1eZnbVbS
-b3obfKGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH
-DAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDdzmoIh5ugFzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAKWjORcBc1WK3r8mq88ipUC2UR1qvxdON4K/hd+rdAj0E/xA
-QCJDORKno8f2MktqLfhU0amCVBvwdfmVFmVDtl38b1pu+mNFO+FDp04039Fd5ThM
-iYmiQjnJ2IcAi/CILtrjURvJUPSOX9lviOtcla0HW94dgA9IDRs5frrWO9jkcxXR
-+oz3LNMfVnXqhoHHQ1RtvqOozhEsUZZWY5MuUxRY25peeZ7m1vz+zDa/DbrV1wsP
-dxOocmYdGFIAT9AiRnR4Jc/hqabBVNMZlGAA+2dELajpaHqb4yx5gBLVkT7VgHjI
-7cp7jLRL7T+i4orZiAXpeEpAeOrP8r0DYTJi/8A=
------END CERTIFICATE-----'''
-
-    invalid_cipher_suites = ['TLS_RSA_WITH_NULL_SHA256',
-                             'TLS_RSA_WITH_AES_128_CBC_SHA',
-                             'TLS_RSA_WITH_AES_128_CBC_SHA256',
-                             'TLS_RSA_WITH_AES_256_CBC_SHA256',
-                             'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256',
-                             'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256',
-                             'TLS_DH_anon_WITH_AES_128_CBC_SHA256',
-                             'TLS_DHE_DSS_WITH_AES_256_CBC_SHA256',
-                             'TLS_DHE_RSA_WITH_AES_256_CBC_SHA256',
-                             'TLS_DH_anon_WITH_AES_256_CBC_SHA256']
-
-
-    def setUp(self):
-        super(eap_auth_exchange, self).setUp()
-        self.onos_ctrl = OnosCtrl(self.app)
-        self.onos_aaa_config()
-
-    def onos_aaa_config(self):
-        OnosCtrl.aaa_load_config()
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls(self):
-        df = defer.Deferred()
-        def eap_tls_verify(df):
-            tls = TLSAuthTest()
-            tls.runTest()
-            df.callback(0)
-        reactor.callLater(0, eap_tls_verify, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_no_cert(self):
-        df = defer.Deferred()
-        def eap_tls_no_cert(df):
-            def tls_no_cert_cb():
-                log_test.info('TLS authentication failed with no certificate')
-            tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_no_cert, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_cert(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_cert(df):
-            def tls_invalid_cert_cb():
-                log_test.info('TLS authentication failed with invalid certificate')
-
-            tls = TLSAuthTest(fail_cb = tls_invalid_cert_cb,
-                              client_cert = self.CLIENT_CERT_INVALID)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_cert, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_Nusers_with_same_valid_cert(self):
-        df = defer.Deferred()
-        def eap_tls_Nusers_with_same_valid_cert(df):
-            num_users = 3
-            for i in xrange(num_users):
-                tls = TLSAuthTest(intf = 'veth{}'.format(i*2))
-                tls.runTest()
-            df.callback(0)
-        reactor.callLater(0, eap_tls_Nusers_with_same_valid_cert, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_session_id(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_session_id(df):
-            def tls_invalid_session_id_cb():
-                log_test.info('TLS authentication failed with invalid session  id')
-            tls = TLSAuthTest(fail_cb = tls_invalid_session_id_cb,session_id = 12345, session_id_length = 1)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_session_id, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_random_gmt_unix_time(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_gmt_unix_time(df):
-            def eap_tls_invalid_gmt_unix_time_cb():
-                log_test.info('TLS authentication failed with invalid gmt_unix_time in Client Hello Packet')
-            for i in [0,7265,98758,23627238]:
-                log_test.info("\nExecuting test case with gmt_unix_time value is set to %d"%i)
-                tls = TLSAuthTest(fail_cb = eap_tls_invalid_gmt_unix_time_cb, gmt_unix_time = i)
-                tls.runTest()
-                assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_gmt_unix_time, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_content_type(self,Positive_Test=True):
-        df = defer.Deferred()
-        def eap_tls_invalid_content_type(df):
-            def tls_invalid_content_type_cb():
-                log_test.info('TLS authentication failed with invalid content type in TLSContentType packet')
-            tls = TLSAuthTest(fail_cb = tls_invalid_content_type_cb, invalid_content_type = 24)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_content_type, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_record_fragment_length(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_record_fragment_length(df):
-            def eap_tls_invalid_record_fragment_length_cb():
-                log_test.info('TLS authentication failed with invalid fragment length field in TLSRecord packet')
-            tls = TLSAuthTest(fail_cb = eap_tls_invalid_record_fragment_length_cb, record_fragment_length = 17384)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_record_fragment_length, df)
-        return df
-
-    #invalid id field in identifier response packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_id_in_identifier_response_packet(self):
-        df = defer.Deferred()
-        def eap_tls_with_invalid_id_in_identifier_response_packet(df):
-            def tls_with_invalid_id_in_identifier_response_packet_cb():
-                log_test.info('TLS authentication failed with invalid id in identifier packet')
-            tls = TLSAuthTest(fail_cb = tls_with_invalid_id_in_identifier_response_packet_cb,
-                              id_mismatch_in_identifier_response_packet = True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_with_invalid_id_in_identifier_response_packet, df)
-        return df
-
-    #invalid id field in client hello packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_id_in_client_hello_packet(self):
-        df = defer.Deferred()
-        def eap_tls_with_invalid_id_in_client_hello_packet(df):
-            def tls_with_invalid_id_in_client_hello_packet_cb():
-                log_test.info('TLS authentication failed with invalid id in client hello packet')
-            tls = TLSAuthTest(fail_cb = tls_with_invalid_id_in_client_hello_packet_cb,
-                              id_mismatch_in_client_hello_packet = True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_with_invalid_id_in_client_hello_packet, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_without_sending_client_hello(self):
-        df = defer.Deferred()
-        def eap_tls_without_sending_client_hello(df):
-            def tls_without_sending_client_hello_cb():
-                log_test.info('TLS authentication failed with not sending client hello')
-            tls = TLSAuthTest(fail_cb = tls_without_sending_client_hello_cb,
-                              dont_send_client_hello = True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_without_sending_client_hello, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_aaa_app_deactivation(self):
-        df = defer.Deferred()
-        def eap_tls_aaa_app_deactivate(df):
-            def tls_aaa_app_deactivate_cb():
-                log_test.info('TLS authentication failed with aaa app deactivated in ONOS')
-            tls = TLSAuthTest(fail_cb = tls_aaa_app_deactivate_cb)
-            self.onos_ctrl.deactivate()
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-	    self.onos_ctrl.activate()
-            df.callback(0)
-        reactor.callLater(0, eap_tls_aaa_app_deactivate, df)
-        return df
-
-    #keeping cipher suite length as zero but including cipher suite key which is more than zero length in client hello packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_incorrect_cipher_suite_length_field(self):
-        df = defer.Deferred()
-        def eap_tls_incorrect_cipher_suite_length_field(df):
-            def tls_incorrect_cipher_suite_length_field_cb():
-                log_test.info('TLS authentication failed with incorrect cipher suite length field in client hello packet')
-            tls = TLSAuthTest(fail_cb = tls_incorrect_cipher_suite_length_field_cb, cipher_suites_length = 0)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_incorrect_cipher_suite_length_field, df)
-        return df
-
-    #keeping compression methods length to zero but sending compression method of more than 0 zero length in client hello packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_incorrect_compression_methods_length_field(self):
-        df = defer.Deferred()
-        def eap_tls_incorrect_compression_methods_length_field(df):
-            def tls_incorrect_compression_methods_length_field_cb():
-                log_test.info('TLS authentication failed with incorrect compression methods length field in client hello packet')
-            tls = TLSAuthTest(fail_cb = tls_incorrect_compression_methods_length_field_cb, compression_methods_length=1,compression_methods=TLSCompressionMethod.LZS)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_incorrect_compression_methods_length_field, df)
-        return df
-
-    #checking with broadcast source mac of EAPOL packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_source_mac_broadcast(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_source_mac_broadcast(df):
-            def tls_invalid_source_mac_broadcast_cb():
-                log_test.info('TLS authentication failed with invalid source mac as broadcast in EAPOL packet')
-            tls = TLSAuthTest(fail_cb = tls_invalid_source_mac_broadcast_cb, src_mac='bcast')
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_source_mac_broadcast, df)
-        return df
-
-    #checking with multicast source mac of EAPOL packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_source_mac_multicast(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_source_mac_multicast(df):
-            def tls_invalid_source_mac_multicast_cb():
-                log_test.info('TLS authentication failed with invalid source mac as multicast in EAPOL packet')
-            tls = TLSAuthTest(fail_cb = tls_invalid_source_mac_multicast_cb, src_mac='mcast')
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_source_mac_multicast, df)
-        return df
-
-    #checking with zero source mac of EAPOL packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_source_mac_zero(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_source_mac_zero(df):
-            def tls_invalid_source_mac_zero_cb():
-                log_test.info('TLS authentication failed with invalid source mac as zero in EAPOL packet')
-            tls = TLSAuthTest(fail_cb = tls_invalid_source_mac_zero_cb, src_mac='zeros')
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_source_mac_zero, df)
-        return df
-
-    #Restarting Radius server after sending client hello
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_restart_of_radius_server(self):
-        df = defer.Deferred()
-        def eap_tls_restart_radius_server(df):
-            def tls_restart_radius_server_cb():
-                log_test.info('TLS authentication failed with  radius server down in middle of authentication process')
-            tls = TLSAuthTest(fail_cb = tls_restart_radius_server_cb, restart_radius=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_restart_radius_server, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_incorrect_handshake_type_client_hello(self):
-        df = defer.Deferred()
-        def eap_tls_incorrect_handshake_type_client_hello(df):
-            def tls_incorrect_handshake_type_client_hello_cb():
-                log_test.info('TLS authentication failed with incorrect handshake type in client hello packet')
-            tls = TLSAuthTest(fail_cb = tls_incorrect_handshake_type_client_hello_cb, invalid_client_hello_handshake_type=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_incorrect_handshake_type_client_hello, df)
-        return df
-
-    #Sending certificate request type of handhsake instead of  certificate verify in client certificate request message
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_incorrect_handshake_type_certificate_request(self):
-        df = defer.Deferred()
-        def eap_tls_incorrect_handshake_type_certificate_request(df):
-            def tls_incorrect_handshake_type_certificate_request_cb():
-                log_test.info('TLS authentication failed with incorrect handshake type in client certificate request packet')
-            tls = TLSAuthTest(fail_cb = tls_incorrect_handshake_type_certificate_request_cb, invalid_cert_req_handshake=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_incorrect_handshake_type_certificate_request, df)
-        return df
-
-    #Sending tls record content type as 'ALERT' instead of 'HANDSHAKE' in certificate request packet
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_incorrect_tlsrecord_certificate_request(self):
-        df = defer.Deferred()
-        def eap_tls_incorrect_tlsrecord_certificate_request(df):
-            def tls_incorrect_tlsrecord_certificate_request_cb():
-                log_test.info('TLS authentication failed with incorrect tlsrecord type  in certificate request packet')
-            tls = TLSAuthTest(fail_cb = tls_incorrect_tlsrecord_certificate_request_cb, incorrect_tlsrecord_type_cert_req=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_incorrect_tlsrecord_certificate_request, df)
-        return df
-
-    #Sending client hello with zero lenght field in Handshake protocol
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_with_invalid_handshake_length_client_hello(self):
-        df = defer.Deferred()
-        def eap_tls_invalid_handshake_length_client_hello(df):
-            def tls_invalid_handshake_length_client_hello_cb():
-                log_test.info('TLS authentication failed with invalid handshake length in client hello packet')
-            tls = TLSAuthTest(fail_cb = tls_invalid_handshake_length_client_hello_cb, invalid_client_hello_handshake_length=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_invalid_handshake_length_client_hello, df)
-        return df
-
-    @deferred(TLS_TIMEOUT)
-    def test_eap_tls_clientkeyex_replace_with_serverkeyex(self):
-        df = defer.Deferred()
-        def eap_tls_clientkeyex_replace_with_serverkeyex(df):
-            def tls_clientkeyex_replace_with_serverkeyex_cb():
-                log_test.info('TLS authentication failed with client key exchange replaced with server key exchange')
-            tls = TLSAuthTest(fail_cb = tls_clientkeyex_replace_with_serverkeyex_cb,clientkeyex_replace_with_serverkeyex=True)
-            tls.runTest()
-            assert_equal(tls.failTest, True)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_clientkeyex_replace_with_serverkeyex, df)
-        return df
-
-    #simulating authentication for multiple users, 1K in this test case
-    @deferred(TEST_TIMEOUT)
-    def test_eap_tls_1k_sessions_with_diff_mac(self):
-        df = defer.Deferred()
-        def eap_tls_1k_with_diff_mac(df):
-            for i in xrange(1000):
-                tls = TLSAuthTest(src_mac = 'random')
-                tls.runTest()
-		log_test.info('Authentication successfull for user %d'%i)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_1k_with_diff_mac, df)
-        return df
-
-    #simulating authentication for multiple users, 5K in this test case
-    @deferred(TEST_TIMEOUT+1800)
-    def test_eap_tls_5k_sessions_with_diff_mac(self):
-        df = defer.Deferred()
-        def eap_tls_5k_with_diff_mac(df):
-            for i in xrange(5000):
-                tls = TLSAuthTest(src_mac = 'random')
-                tls.runTest()
-                log_test.info('Authentication successfull for user %d'%i)
-            df.callback(0)
-        reactor.callLater(0, eap_tls_5k_with_diff_mac, df)
-        return df
-
-    def tls_scale(self, num_sessions):
-        '''Called from scale test'''
-        def tls_session_fail_cb():
-            pass
-        for i in xrange(num_sessions):
-            tls = TLSAuthTest(src_mac = 'random', fail_cb = tls_session_fail_cb)
-            tls.runTest()
-            if tls.failTest is False:
-                log_test.info('Authentication successful for user %d'%i)
-            else:
-                log_test.info('Authentication failed for user %d' %i)
-
-if __name__ == '__main__':
-    t = TLSAuthTest()
-    t.runTest()
diff --git a/src/test/utils/ACL.py b/src/test/utils/ACL.py
deleted file mode 100644
index dc3ce91..0000000
--- a/src/test/utils/ACL.py
+++ /dev/null
@@ -1,158 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import json
-import requests
-import os,sys,time
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from scapy.all import *
-from CordTestUtils import get_mac, get_controller, log_test
-from OnosCtrl import OnosCtrl
-from OnosFlowCtrl import OnosFlowCtrl
-log_test.setLevel('INFO')
-
-conf.verb = 0 # Disable Scapy verbosity
-conf.checkIPaddr = 0 # Don't check response packets for matching destination IPs
-
-class ACLTest:
-
-    auth = ('karaf', 'karaf')
-    controller = get_controller()
-    add_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
-    remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(controller, id)
-    clear_all_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
-    iface_create_onos_url = 'http://%s:8181/onos/v1/network/configuration' %(controller)
-    device_id = 'of:' + get_mac()
-    MAX_PORTS = 100
-
-    def __init__(self, ipv4Prefix ='v4', srcIp ='null', dstIp ='null', ipProto = 'null', dstTpPort = 0, action = 'null', ingress_iface = 1, egress_iface = 2,iface_num = 0, iface_name = 'null', iface_count = 0, iface_ip = 'null'):
-        self.ipv4Prefix = ipv4Prefix
-        self.srcIp = srcIp
-        self.ingress_iface = ingress_iface
-        self.egress_iface = egress_iface
-        self.dstIp = dstIp
-        self.ipProto = ipProto
-        self.dstTpPort = dstTpPort
-        self.action = action
-        self.iface_count = iface_count
-        self.iface_num = iface_num
-        self.iface_name = iface_name
-        self.iface_ip = iface_ip
-        self.device_id = OnosCtrl.get_device_id()
-
-    def adding_acl_rule(self, ipv4Prefix, srcIp, dstIp, ipProto ='null', dstTpPort='null', action= 'include',controller=None):
-        '''This function is generating ACL json file and post to ONOS for creating a ACL rule'''
-        if ipv4Prefix is 'v4':
-           acl_dict = {}
-           if srcIp and dstIp and action:
-              acl_dict['srcIp'] = '{}'.format(srcIp)
-              acl_dict['dstIp'] = '{}'.format(dstIp)
-              acl_dict['action'] = '{}'.format(action)
-           if ipProto is not 'null':
-              acl_dict['ipProto'] = '{}'.format(ipProto)
-           if dstTpPort is not 'null':
-              acl_dict['dstTpPort'] = '{}'.format(dstTpPort)
-        json_data = json.dumps(acl_dict)
-	if controller is None:
-	    # if controller  ip is not passed, it will default controller ip
-            resp = requests.post(self.add_acl_rule_url, auth = self.auth, data = json_data)
-	else:
-	    add_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
-	    log_test.info('add_acl_rule_acl url is %s'%add_acl_rule_url)
-            resp = requests.post(add_acl_rule_url, auth = self.auth, data = json_data)
-        return resp.ok, resp.status_code
-
-    def get_acl_rules(self,controller=None):
-        '''This function is getting a ACL rules from ONOS with json formate'''
-	if controller is None:
-            resp = requests.get(self.add_acl_rule_url, auth = self.auth)
-	else:
-	    add_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
-	    log_test.info('get_acl_rule_url is %s'%add_acl_rule_url)
-	    resp = requests.get(add_acl_rule_url, auth = self.auth)
-        return resp
-
-    @classmethod
-    def remove_acl_rule(cls,id = None,controller=None):
-        '''This function is delete one or all  ACL rules in ONOS'''
-        if id is None:
-	    if controller is None:
-                remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(cls.controller)
-	    else:
-		remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules' %(controller)
-        else:
-	    if controller is None:
-                remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(cls.controller, id)
-	    else:
-		remove_acl_rule_url = 'http://%s:8181/onos/v1/acl/rules/%s' %(controller, id)
-	log_test.info('remove_acl_rule_url is %s'%remove_acl_rule_url)
-        resp = requests.delete(remove_acl_rule_url, auth = cls.auth)
-        return resp.ok, resp.status_code
-
-    def generate_onos_interface_config(self,iface_num = 4, iface_name = 'null',iface_count = 1,iface_ip = '198.162.10.1'):
-        '''This function is generate interface config data in json format and post to ONOS for creating it '''
-        ''' To add interfaces on ONOS to test acl with trffic'''
-        num = 0
-        egress_host_list = []
-        interface_list = []
-        ip = iface_ip.split('/')[0]
-        start_iface_ip = ip.split('.')
-        start_ip = ( int(start_iface_ip[0]) << 24) | ( int(start_iface_ip[1]) << 16)  |  ( int(start_iface_ip[2]) << 8) | 0
-        end_ip =  ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        ports_dict = { 'ports' : {} }
-        for n in xrange(start_ip, end_ip, 256):
-            port_map = ports_dict['ports']
-            port = iface_num if num < self.MAX_PORTS - 1 else self.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(self.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 2
-            peer_ip = n + 1
-            ips = '%d.%d.%d.%d/%d'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff, int(iface_ip.split('/')[1]))
-            peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
-            mac = RandMAC()._fix()
-            egress_host_list.append((peer, mac))
-            if num < self.MAX_PORTS - 1:
-               interface_dict = { 'name' : '{0}-{1}'.format(iface_name,port), 'ips': [ips], 'mac' : mac }
-               interfaces.append(interface_dict)
-               interface_list.append(interface_dict['name'])
-            else:
-               interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == iface_count:
-               break
-        json_data = json.dumps(ports_dict)
-        resp = requests.post(self.iface_create_onos_url, auth = self.auth, data = json_data)
-        return resp.ok, resp.status_code, egress_host_list
diff --git a/src/test/utils/Channels.py b/src/test/utils/Channels.py
deleted file mode 100644
index 6c4346c..0000000
--- a/src/test/utils/Channels.py
+++ /dev/null
@@ -1,356 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import threading
-import sys
-import os
-import time
-import monotonic
-import random
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from scapy.all import *
-from McastTraffic import *
-from IGMP import *
-from OnosCtrl import OnosCtrl
-from CordTestUtils import log_test
-from nose.tools import *
-log_test.setLevel('DEBUG')
-
-conf.verb = 0
-
-class IgmpChannel:
-
-    IGMP_DST_MAC = "01:00:5e:00:01:01"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.1.1'
-    igmp_eth = Ether(dst = IGMP_DST_MAC, src = IGMP_SRC_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST, src = IP_SRC)
-    ssm_list = []
-
-    def __init__(self, iface = 'veth0', ssm_list = [], src_list = None, delay = 2,controller=None):
-
-	self.controller=controller
-        self.iface = iface
-        self.ssm_list += ssm_list
-        if src_list is None:
-           self.src_list = ['1.2.3.4']
-        else:
-            self.src_list = src_list
-        self.delay = delay
-        self.onos_ctrl = OnosCtrl('org.opencord.igmp',controller=self.controller)
-        self.onos_ctrl.activate()
-
-    def igmp_load_ssm_config(self, ssm_list = [], src_list = None):
-        if src_list is None:
-            src_list = self.src_list
-        if not ssm_list:
-            ssm_list = self.ssm_list
-        self.ssm_table_load(ssm_list, src_list = src_list)
-
-    def igmp_join(self, groups, src_list = None, record_type = None):
-        if src_list is None:
-            src_list = self.src_list
-        if record_type is None:
-           record_type = IGMP_V3_GR_TYPE_INCLUDE
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr='224.0.1.1')
-        for g in groups:
-              gr = IGMPv3gr(rtype=record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-
-        pkt = self.igmp_eth/self.igmp_ip/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface=self.iface)
-        if self.delay != 0:
-            time.sleep(self.delay)
-
-    def igmp_leave(self, groups, src_list = None):
-        if src_list is None:
-            src_list = self.src_list
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr='224.0.1.1')
-        for g in groups:
-              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-
-        pkt = self.igmp_eth/self.igmp_ip/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface = self.iface)
-        if self.delay != 0:
-            time.sleep(self.delay)
-
-    def onos_load_config(self, config):
-        status, code = OnosCtrl.config(config,controller=self.controller)
-        if status is False:
-            log_test.info('JSON config request returned status %d' %code)
-        time.sleep(2)
-
-    def ssm_table_load(self, groups, src_list = None):
-          return
-          if src_list is None:
-              src_list = self.src_list
-          ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
-          for g in groups:
-                for s in src_list:
-                      d = {}
-                      d['source'] = s
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-          self.onos_load_config(ssm_dict)
-
-    def cord_port_table_load(self, cord_port_map):
-          return
-          cord_group_dict = {'apps' : { 'org.ciena.cordigmp' : { 'cordIgmpTranslate' : [] } } }
-          cord_group_xlate_list = cord_group_dict['apps']['org.ciena.cordigmp']['cordIgmpTranslate']
-          for group, ports in cord_port_map.items():
-              d = {}
-              d['group'] = group
-              d['inputPort'] = ports[0]
-              d['outputPort'] = ports[1]
-              cord_group_xlate_list.append(d)
-          self.onos_load_config(cord_group_dict)
-
-class Channels(IgmpChannel):
-    Stopped = 0
-    Started = 1
-    Idle = 0
-    Joined = 1
-    def __init__(self, num, channel_start = 0, iface = 'veth0', iface_mcast = 'veth2', mcast_cb = None, src_list = None):
-        self.num = num
-        self.channel_start = channel_start
-        self.channels = self.generate(self.num, self.channel_start)
-        self.group_channel_map = {}
-        #assert_equal(len(self.channels), self.num)
-        for i in range(self.num):
-            self.group_channel_map[self.channels[i]] = i
-        self.state = self.Stopped
-        self.streams = None
-        self.channel_states = {}
-        self.last_chan = None
-        self.iface_mcast = iface_mcast
-        self.mcast_cb = mcast_cb
-        self.src_list = src_list
-        self.streams_list = []
-        for c in range(self.num):
-            self.channel_states[c] = [self.Idle]
-        IgmpChannel.__init__(self, ssm_list = self.channels, iface=iface, src_list = src_list)
-
-    def generate(self, num, channel_start = 0):
-        start = (225 << 24) | ( ( (channel_start >> 16) & 0xff) << 16 ) | \
-            ( ( (channel_start >> 8) & 0xff ) << 8 ) | (channel_start) & 0xff
-        start += channel_start/256 + 1
-        end = start + num
-        group_addrs = []
-        count = 0
-        while count != num:
-            for i in range(start, end):
-                if i&255:
-                    g = '%s.%s.%s.%s' %((i>>24) &0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
-                    log_test.debug('Adding group %s' %g)
-                    group_addrs.append(g)
-                    count += 1
-            start = end
-            end = start + 1
-        return group_addrs
-
-    def start(self):
-        if self.state == self.Stopped:
-            if self.streams:
-                self.streams.stop()
-            if self.streams_list:
-               for i in range(len(self.streams_list)):
-                  self.streams_list[i].stop()
-            if self.src_list:
-               for i in range(len(self.src_list)):
-                  self.streams_list.append(McastTraffic(self.channels, iface=self.iface_mcast, cb = self.mcast_cb, src_ip = self.src_list[i]))
-                  self.streams_list[i].start()
-#               self.streams = McastTraffic(self.channels, iface=self.iface_mcast, cb = self.mcast_cb)
-#               self.streams.start()
-
-            else:
-                self.streams = McastTraffic(self.channels, iface=self.iface_mcast, cb = self.mcast_cb)
-                self.streams.start()
-            self.state = self.Started
-
-    def join(self, chan = None, src_list = None, record_type = None):
-    #def join(self, chan = None):
-        if chan is None:
-            chan = random.randint(0, self.num)
-        else:
-            if chan >= self.num:
-                chan = 0
-
-        if self.get_state(chan) == self.Joined:
-            return chan, 0
-        groups = [self.channels[chan]]
-        join_start = monotonic.monotonic()
-        self.igmp_join(groups, src_list = src_list, record_type = record_type)
-        self.set_state(chan, self.Joined)
-        self.last_chan = chan
-        return chan, join_start
-
-    def leave(self, chan, force = False, src_list = None):
-        if chan is None:
-            chan = self.last_chan
-        if chan is None or chan >= self.num:
-            return False
-        if force is False and self.get_state(chan) != self.Joined:
-            return False
-        groups = [self.channels[chan]]
-        self.igmp_leave(groups, src_list = src_list)
-        self.set_state(chan, self.Idle)
-        if chan == self.last_chan:
-            self.last_chan = None
-        return True
-
-    def join_next(self, chan = None, src_list = None, leave_flag = True):
-        if chan is None and self.last_chan is not None:
-            chan = self.last_chan
-            if chan is None:
-                return None
-            leave = chan
-            join  = chan+1
-        else:
-            leave = chan - 1
-            join = chan
-
-        if join >= self.num:
-            join = 0
-
-        if leave >= 0 and leave != join:
-            if leave_flag is True:
-                self.leave(leave, src_list = src_list)
-
-        return self.join(join, src_list = src_list)
-
-    def jump(self):
-        chan = self.last_chan
-        if chan is not None:
-            self.leave(chan)
-            s_next = chan
-        else:
-            s_next = 0
-        if self.num - s_next < 2:
-            s_next = 0
-        chan = random.randint(s_next, self.num)
-        return self.join(chan)
-
-    def gaddr(self, chan):
-        '''Return the group address for a channel'''
-        if chan >= self.num:
-            return None
-        return self.channels[chan]
-
-    def caddr(self, group):
-        '''Return a channel given a group addr'''
-        if self.group_channel_map.has_key(group):
-            return self.group_channel_map[group]
-        return None
-
-    def recv_cb(self, pkt, src_list = None):
-        '''Default channel receive callback'''
-        log_test.debug('Received packet from source %s, destination %s' %(pkt[IP].src, pkt[IP].dst))
-        if src_list is None:
-           send_time = float(pkt[IP].payload.load)
-           recv_time = monotonic.monotonic()
-           log_test.debug('Packet received in %.3f usecs' %(recv_time - send_time))
-        elif(pkt[IP].src == src_list[0]):
-           log_test.debug('Received packet from specified source %s, destination %s' %(pkt[IP].src, pkt[IP].dst))
-        elif(pkt[IP].src != src_list[0]):
-           log_test.debug('Received packet not from specified source %s, destination %s' %(pkt[IP].src, pkt[IP].dst))
-           time.sleep(60)
-
-    def recv(self, chan, cb = None, count = 1, timeout = 5, src_list = None):
-        if chan is None:
-            return None
-        if type(chan) == type([]) or type(chan) == type(()):
-            channel_list=filter(lambda c: c < self.num, chan)
-            groups = map(lambda c: self.gaddr(c), channel_list)
-        else:
-            groups = (self.gaddr(chan),)
-        if cb is None:
-            cb = self.recv_cb(src_list = src_list)
-        return sniff(prn = cb, count=count, timeout = timeout,
-                     lfilter = lambda p: IP in p and p[IP].dst in groups, iface = bytes(self.iface[:15]))
-
-    def not_recv(self, chan, cb = None, count = 1, timeout = 5, src_list = None):
-        if chan is None:
-            return None
-        if type(chan) == type([]) or type(chan) == type(()):
-            channel_list=filter(lambda c: c < self.num, chan)
-            groups = map(lambda c: self.gaddr(c), channel_list)
-        else:
-            groups = (self.gaddr(chan),)
-
-        if cb is None:
-            cb = self.recv_cb(src_list = src_list)
-        return sniff(prn = cb, count=count, timeout = timeout,
-                     lfilter = lambda p: IP in p and p[IP].dst in groups and p[IP].src in src_list, iface = bytes(self.iface[:15]))
-
-    def stop(self):
-        if self.streams:
-            self.streams.stop()
-        if self.streams_list:
-           for i in range(len(self.streams_list)):
-               self.streams_list[i].stop()
-        self.state = self.Stopped
-
-    def get_state(self, chan):
-        abc = self.channel_states[chan][0]
-        return abc
-
-    def set_state(self, chan, state):
-        self.channel_states[chan][0] = state
-
-if __name__ == '__main__':
-    num = 5
-    start = 0
-    ssm_list = []
-    src_list = [ '1.2.3.4' ]
-    for i in xrange(2):
-        channels = Channels(num, start, src_list = src_list)
-        ssm_list += channels.channels
-        start += num
-    igmpChannel = IgmpChannel(src_list = src_list)
-    igmpChannel.igmp_load_ssm_config(ssm_list, src_list)
-    channels.start()
-    for i in range(num):
-        channels.join(i)
-    for i in range(num):
-        channels.recv(i)
-    for i in range(num):
-        channels.leave(i)
-    channels.stop()
diff --git a/src/test/utils/Cluster.py b/src/test/utils/Cluster.py
deleted file mode 100644
index 4f762ea..0000000
--- a/src/test/utils/Cluster.py
+++ /dev/null
@@ -1,2560 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import unittest
-from twisted.internet import defer
-from nose.tools import *
-from IGMP import *
-from ACL import ACLTest
-from DHCP import DHCPTest
-from Channels import Channels, IgmpChannel
-from subscriberDb import SubscriberDB
-import time, monotonic
-from CordTestUtils import get_mac, log_test
-from OltConfig import OltConfig
-from OnosCtrl import OnosCtrl
-from OnosFlowCtrl import OnosFlowCtrl
-from CordContainer import Container, Onos, Quagga
-from onosclidriver import OnosCliDriver
-from CordTestServer import CordTestServer, cord_test_onos_restart, cord_test_quagga_restart,cord_test_quagga_stop, cord_test_quagga_shell,cord_test_shell
-from portmaps import g_subscriber_port_map
-import time, monotonic
-from scapy_ssl_tls.ssl_tls import *
-from scapy_ssl_tls.ssl_tls_crypto import *
-import os
-import json
-from socket import socket
-import pexpect
-from Stats import Stats
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from scapy.all import *
-from OnosFlowCtrl import OnosFlowCtrl
-from nose.twistedtools import reactor, deferred
-import tempfile
-import threading
-from threading import current_thread
-from threadPool import ThreadPool
-import random
-import collections
-import requests
-log_test.setLevel('INFO')
-class cluster_igmp(object):
-    V_INF1 = 'veth0'
-    V_INF2 = 'veth1'
-    MGROUP1 = '239.1.2.3'
-    MGROUP2 = '239.2.2.3'
-    MINVALIDGROUP1 = '255.255.255.255'
-    MINVALIDGROUP2 = '239.255.255.255'
-    MMACGROUP1 = "01:00:5e:01:02:03"
-    MMACGROUP2 = "01:00:5e:02:02:03"
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-    IP_SRC = '1.2.3.4'
-    IP_DST = '224.0.0.22'
-    NEGATIVE_TRAFFIC_STATUS = 1
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-    IGMP_TEST_TIMEOUT = 5
-    IGMP_QUERY_TIMEOUT = 60
-    MCAST_TRAFFIC_TIMEOUT = 10
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    max_packets = 100
-    app = 'org.opencord.igmp'
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
-    ROVER_TEST_TIMEOUT = 300 #3600*86
-    ROVER_TIMEOUT = (ROVER_TEST_TIMEOUT - 100)
-    ROVER_JOIN_TIMEOUT = 60
-
-    @classmethod
-    def setUpClass(cls,controller=None):
-          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-          cls.port_map, _ = cls.olt.olt_port_map()
-          OnosCtrl.cord_olt_config(cls.olt, controller=controller)
-
-    @classmethod
-    def tearDownClass(cls): pass
-
-    def setUp(self,controller=None):
-	self.setUpClass(controller=controller)
-	self.get_igmp_intf()
-        ''' Activate the igmp app'''
-        self.onos_ctrl = OnosCtrl(self.app,controller=controller)
-        self.onos_ctrl.activate()
-        self.igmp_channel = IgmpChannel(controller=controller)
-
-    def tearDown(self): pass
-
-    def onos_load_config(self, config,controller=None):
-        log_test.info('onos load config is %s'%config)
-        status, code = OnosCtrl(self.app).config(config,controller=controller)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-    def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],controller=None,flag = False):
-          ssm_dict = {'apps' : { 'org.onosproject.igmp' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.onosproject.igmp']['ssmTranslate']
-          if flag: #to maintain seperate group-source pair.
-              for i in range(len(groups)):
-                  d = {}
-                  d['source'] = src_list[i] or '0.0.0.0'
-                  d['group'] = groups[i]
-                  ssm_xlate_list.append(d)
-          else:
-              for g in groups:
-                  for s in src_list:
-                      d = {}
-                      d['source'] = s or '0.0.0.0'
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-          self.onos_load_config(ssm_dict,controller=controller)
-          cord_port_map = {}
-          for g in groups:
-                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
-          self.igmp_channel.cord_port_table_load(cord_port_map)
-          time.sleep(2)
-
-    def mcast_ip_range(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def random_mcast_ip(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def source_ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-    def iptomac(self, mcast_ip):
-        mcast_mac =  '01:00:5e:'
-        octets = mcast_ip.split('.')
-        second_oct = int(octets[1]) & 127
-        third_oct = int(octets[2])
-        fourth_oct = int(octets[3])
-        mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
-        return mcast_mac
-
-    def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def get_igmp_intf(self):
-        inst = os.getenv('TEST_INSTANCE', None)
-        if not inst:
-            return 'veth0'
-        inst = int(inst) + 1
-        if inst >= self.port_map['uplink']:
-            inst += 1
-        if self.port_map.has_key(inst):
-              return self.port_map[inst]
-        return 'veth0'
-
-    def igmp_verify_join(self, igmpStateList):
-        sendState, recvState = igmpStateList
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            tx = tx_stats.count
-            assert_greater(tx, 0)
-            rx_stats = recvState.group_map[g][1]
-            rx = rx_stats.count
-            assert_greater(rx, 0)
-            log_test.info('Receive stats %s for group %s' %(rx_stats, g))
-
-        log_test.info('IGMP test verification success')
-
-    def igmp_verify_leave(self, igmpStateList, leave_groups):
-        sendState, recvState = igmpStateList[0], igmpStateList[1]
-        ## check if the send is received for the groups
-        for g in sendState.groups:
-            tx_stats = sendState.group_map[g][0]
-            rx_stats = recvState.group_map[g][1]
-            tx = tx_stats.count
-            rx = rx_stats.count
-            assert_greater(tx, 0)
-            if g not in leave_groups:
-                log_test.info('Received %d packets for group %s' %(rx, g))
-        for g in leave_groups:
-            rx = recvState.group_map[g][1].count
-            assert_equal(rx, 0)
-
-        log_test.info('IGMP test verification success')
-
-    def mcast_traffic_timer(self):
-          self.mcastTraffic.stopReceives()
-
-    def send_mcast_cb(self, send_state):
-        for g in send_state.groups:
-            send_state.update(g, tx = 1)
-        return 0
-
-    ##Runs in the context of twisted reactor thread
-    def igmp_recv(self, igmpState, iface = 'veth0'):
-        p = self.recv_socket.recv()
-        try:
-              send_time = float(p.payload.load)
-              recv_time = monotonic.monotonic()
-        except:
-              log_test.info('Unexpected Payload received: %s' %p.payload.load)
-              return 0
-        #log_test.info( 'Recv in %.6f secs' %(recv_time - send_time))
-        igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
-        return 0
-
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        sendp(pkt, iface=iface)
-        log_test.info('igmp join packet is %s'%pkt.show())
-        if delay != 0:
-            time.sleep(delay)
-
-class cluster_tls():
-    eap_app = 'org.opencord.aaa'
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIEyTCCA7GgAwIBAgIJAM6l2jUG56pLMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
-VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
-ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
-JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjAzMTEx
-ODUzMzVaFw0xNzAzMDYxODUzMzVaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
-Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
-CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
-cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAL9Jv54TkqycL3U2Fdd/y5NXdnPVXwAVV3m6I3eIffVCv8eS+mwlbl9dnbjo
-qqlGEgA3sEg5HtnKoW81l3PSyV/YaqzUzbcpDlgWlbNkFQ3nVxh61gSU34Fc4h/W
-plSvCkwGSbV5udLtEe6S9IflP2Fu/eXa9vmUtoPqDk66p9U/nWVf2H1GJy7XanWg
-wke+HpQvbzoSfPJS0e5Rm9KErrzaIkJpqt7soW+OjVJitUax7h45RYY1HHHlbMQ0
-ndWW8UDsCxFQO6d7nsijCzY69Y8HarH4mbVtqhg3KJevxD9UMRy6gdtPMDZLah1c
-LHRu14ucOK4aF8oICOgtcD06auUCAwEAAaOCASwwggEoMB0GA1UdDgQWBBQwEs0m
-c8HARTVp21wtiwgav5biqjCBwAYDVR0jBIG4MIG1gBQwEs0mc8HARTVp21wtiwga
-v5biqqGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
-EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
-D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
-dXRob3JpdHmCCQDOpdo1BueqSzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
-oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
-hvcNAQELBQADggEBAK+fyAFO8CbH35P5mOX+5wf7+AeC+5pwaFcoCV0zlfwniANp
-jISgcIX9rcetLxeYRAO5com3+qLdd9dGVNL0kwufH4QhlSPErG7OLHHAs4JWVhUo
-bH3lK9lgFVlnCDBtQhslzqScR64SCicWcQEjv3ZMZsJwYLvl8unSaKz4+LVPeJ2L
-opCpmZw/V/S2NhBbe3QjTiRPmDev2gbaO4GCfi/6sCDU7UO3o8KryrkeeMIiFIej
-gfwn9fovmpeqCEyupy2JNNUTJibEuFknwx7JAX+htPL27nEgwV1FYtwI3qLiZqkM
-729wo9cFSslJNZBu+GsBP5LszQSuvNTDWytV+qY=
------END CERTIFICATE-----'''
-    def __init__(self):
-	pass
-    def setUp(self,controller=None):
-        self.onos_ctrl = OnosCtrl(self.eap_app,controller=controller)
-        self.onos_aaa_config(controller=controller)
-
-    def onos_aaa_config(self,controller=None):
-	log_test.info('controller in onos_aaa_config is %s'%controller)
-        aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
-                                                                'radiusIp': '172.17.0.2' } } } }
-        radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
-        aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
-        self.onos_ctrl.activate()
-        time.sleep(2)
-        self.onos_load_config(aaa_dict,controller=controller)
-
-    def onos_load_config(self, config,controller=None):
-	log_test.info('controller in onos_load_config is %s'%controller)
-        log_test.info('onos load config is %s'%config)
-        status, code = OnosCtrl(self.eap_app).config(config,controller=controller)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-
-class cluster_flows():
-
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    INTF_TX_DEFAULT = 'veth2'
-    INTF_RX_DEFAULT = 'veth0'
-    default_port_map = {
-        PORT_TX_DEFAULT : INTF_TX_DEFAULT,
-        PORT_RX_DEFAULT : INTF_RX_DEFAULT,
-        INTF_TX_DEFAULT : PORT_TX_DEFAULT,
-        INTF_RX_DEFAULT : PORT_RX_DEFAULT
-        }
-    app = 'org.onosproject.cli'
-
-    def incmac(self, mac):
-        tmp =  str(hex(int('0x'+mac,16)+1).split('x')[1])
-        mac = '0'+ tmp if len(tmp) < 2 else tmp
-        return mac
-
-    def next_mac(self, mac):
-        mac = mac.split(":")
-        mac[5] = self.incmac(mac[5])
-
-        if len(mac[5]) > 2:
-           mac[0] = self.incmac(mac[0])
-           mac[5] = '01'
-
-        if len(mac[0]) > 2:
-           mac[0] = '01'
-           mac[1] = self.incmac(mac[1])
-           mac[5] = '01'
-        return ':'.join(mac)
-
-    def to_egress_mac(cls, mac):
-        mac = mac.split(":")
-        mac[4] = '01'
-
-        return ':'.join(mac)
-
-    def inc_ip(self, ip, i):
-
-        ip[i] =str(int(ip[i])+1)
-        return '.'.join(ip)
-
-
-    def next_ip(self, ip):
-
-        lst = ip.split('.')
-        for i in (3,0,-1):
-            if int(lst[i]) < 255:
-               return self.inc_ip(lst, i)
-            elif int(lst[i]) == 255:
-               lst[i] = '0'
-               if int(lst[i-1]) < 255:
-                  return self.inc_ip(lst,i-1)
-               elif int(lst[i-2]) < 255:
-                  lst[i-1] = '0'
-                  return self.inc_ip(lst,i-2)
-               else:
-                  break
-
-    def randomip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def ip_range(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return ip_range
-
-    def to_egress_ip(self, ip):
-        lst=ip.split('.')
-        lst[0] = '182'
-        return '.'.join(lst)
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = cls.default_port_map
-        cls.device_id = OnosCtrl.get_device_id()
-
-class cluster_proxyarp():
-    #apps = ('org.onosproject.vrouter','org.onosproject.proxyarp')
-    app = 'org.onosproject.proxyarp'
-    device_id = 'of:' + get_mac()
-    device_dict = { "devices" : {
-                "{}".format(device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    MAX_PORTS = 100
-    hosts_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
-
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json'))
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-	print('port map in proxyarp setUpClass is %s'%cls.port_map)
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        cls.load_device_id()
-
-    @classmethod
-    def load_device_id(cls):
-        did = OnosCtrl.get_device_id()
-        cls.device_id = did
-        cls.device_dict = { "devices" : {
-                "{}".format(did) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-            },
-        }
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def interface_config_load(cls, interface_cfg = None):
-        if type(interface_cfg) is tuple:
-            res = []
-            for v in interface_cfg:
-                if type(v) == list:
-                    pass
-                else:
-                    res += v.items()
-                    config = dict(res)
-        else:
-            config = interface_cfg
-        cfg = json.dumps(config)
-        #with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
-        #    f.write(cfg)
-        #return cord_test_onos_restart(config=config)
-
-    @classmethod
-    def host_config_load(cls, host_config = None, controller=None):
-        for host in host_config:
-            status, code = OnosCtrl(cls.app).host_config(host,onos_ip=controller)
-            if status is False:
-                log_test.info('JSON request returned status %d' %code)
-                assert_equal(status, True)
-
-    @classmethod
-    def generate_interface_config(cls, hosts = 1):
-        num = 0
-        start_host = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
-        end_host =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        hosts_list = []
-        for n in xrange(start_host, end_host, 256):
-            port_map = ports_dict['ports']
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 1
-            host_ip = n + 2
-            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
-            host = '%d.%d.%d.%d' % ( (host_ip >> 24) & 0xff, ( ( host_ip >> 16) & 0xff ), ( (host_ip >> 8 ) & 0xff ), host_ip & 0xff )
-            mac = RandMAC()._fix()
-            hosts_list.append((host, mac))
-            if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == hosts:
-                break
-        cls.hosts_list = hosts_list
-        return (cls.device_dict, ports_dict, hosts_list)
-
-    @classmethod
-    def generate_host_config(cls):
-        num = 0
-        hosts_dict = {}
-        for host, mac in cls.hosts_list:
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': port}}
-            num += 1
-        return hosts_dict.values()
-
-    @classmethod
-    def proxyarp_activate(cls, deactivate = False,controller=None):
-        app = 'org.onosproject.proxyarp'
-        onos_ctrl = OnosCtrl(app,controller=controller)
-        if deactivate is True:
-            onos_ctrl.deactivate()
-        else:
-            onos_ctrl.activate()
-        time.sleep(3)
-
-    @classmethod
-    def proxyarp_config(cls, hosts = 1,controller=None):
-        proxyarp_configs = cls.generate_interface_config(hosts = hosts)
-        cls.interface_config_load(interface_cfg = proxyarp_configs)
-        hostcfg = cls.generate_host_config()
-        cls.host_config_load(host_config = hostcfg,controller=controller)
-        return proxyarp_configs
-
-    def proxyarp_arpreply_verify(self, ingress, hostip, hostmac, PositiveTest=True):
-        #log_test.info('verifying arp reply for host ip %s host mac %s on interface %s'%(hostip ,hostmac ,self.port_map[ingress]))
-        self.success = False
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
-                self.success = True if PositiveTest == True else False
-            sniff(count=1, timeout=2, lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
-                  prn = recv_cb, iface = self.port_map[ingress])
-        t = threading.Thread(target = recv_task)
-        t.start()
-        pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst=hostip))
-        log_test.info('sending arp request  for dest ip %s on interface %s' %
-                 (hostip, self.port_map[ingress]))
-        sendp( pkt, count = 10, iface = self.port_map[ingress])
-        t.join()
-        if PositiveTest:
-            assert_equal(self.success, True)
-        else:
-            assert_equal(self.success, False)
-
-    def proxyarp_hosts_verify(self, hosts = 1,PositiveTest = True):
-	log_test.info('verifying arp reply for host ip host mac on interface %s'%(self.port_map[2]))
-        _,_,hosts_config = self.proxyarp_config(hosts = hosts)
-        log_test.info('\nhosts_config %s and its type %s'%(hosts_config,type(hosts_config)))
-        self.cliEnter()
-        connected_hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log_test.info('Discovered hosts: %s' %connected_hosts)
-        #We read from cli if we expect less number of routes to avoid cli timeouts
-        if hosts <= 10000:
-            assert_equal(len(connected_hosts), hosts)
-        ingress = hosts+1
-        for hostip, hostmac in hosts_config:
-                self.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = PositiveTest)
-                time.sleep(1)
-        self.cliExit()
-        return True
-
-class cluster_vrouter(object):
-    apps = ('org.onosproject.vrouter', 'org.onosproject.fwd')
-    device_id = 'of:' + get_mac()
-    vrouter_device_dict = { "devices" : {
-                "{}".format(device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-    zebra_conf = '''
-password zebra
-log stdout
-service advanced-vty
-!
-!debug zebra rib
-!debug zebra kernel
-!debug zebra fpm
-!
-!interface eth1
-! ip address 10.10.0.3/16
-line vty
- exec-timeout 0 0
-'''
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    quagga_config_path = os.path.join(test_path, '..', 'setup/quagga-config')
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    MAX_PORTS = 100
-    peer_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
-    network_list = []
-    network_mask = 24
-    default_routes_address = ('11.10.10.0/24',)
-    default_peer_address = peer_list
-    quagga_ip = os.getenv('QUAGGA_IP')
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the vrouter apps'''
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        cls.load_device_id()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the vrouter apps'''
-        #cls.vrouter_host_unload()
-        cls.start_onos(network_cfg = {})
-	#cls.vrouter_activate(cls, deactivate = True)
-
-
-    @classmethod
-    def load_device_id(cls):
-        did = OnosCtrl.get_device_id()
-        cls.device_id = did
-        cls.vrouter_device_dict = { "devices" : {
-                "{}".format(did) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-            },
-        }
-
-    def cliEnter(self,controller=None):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True,controller=controller)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def onos_load_config(cls, config,controller=None):
-        status, code = OnosCtrl.config(config,controller=controller)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    @classmethod
-    def vrouter_config_get(cls, networks = 4, peers = 1, peer_address = None,
-                           route_update = None, router_address = None):
-        vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers,
-                                                    peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    @classmethod
-    def vrouter_host_load(cls,peer_address = None,controller=None):
-        index = 1
-        hosts_dict = {}
-	peer_info = peer_address if peer_address is not None else cls.peer_list
-        for host,_ in peer_info:
-	    #iface = cls.port_map[index]
-	    mac = RandMAC()._fix()
-            #port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-	    log_test.info('creating host with ip %s and mac %s'%(host,mac))
-            hosts_dict[host] = {'mac':mac, 'vlan':'none', 'ipAddresses':[host], 'location':{ 'elementId' : '{}'.format(cls.device_id), 'port': index}}
-	    index += 1
-	for host in hosts_dict.values():
-            status, code = OnosCtrl.host_config(host,onos_ip=controller)
-            if status is False:
-                log_test.info('JSON request returned status %d' %code)
-                return False
-        return True
-
-    """@classmethod
-    def vrouter_host_load(cls, peer_address = None):
-	#cls.setUpClass()
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            log_test.info('Assigning ip %s to interface %s' %(host, iface))
-            config_cmds = ( 'ifconfig {} 0'.format(iface),
-                            'ifconfig {0} {1}'.format(iface, host),
-                            'arping -I {0} {1} -c 2'.format(iface, host),
-                            )
-            for cmd in config_cmds:
-                os.system(cmd)
-    """
-    @classmethod
-    def vrouter_host_unload(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            config_cmds = ('ifconfig {} 0'.format(iface), )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def start_onos(cls, network_cfg = None):
-        if type(network_cfg) is tuple:
-            res = []
-            for v in network_cfg:
-                res += v.items()
-            config = dict(res)
-        else:
-            config = network_cfg
-	cfg = json.dumps(config)
-        log_test.info('Restarting ONOS with new network configuration %s'%config)
-        #return cord_test_onos_restart(config = config)
-	with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
-            f.write(cfg)
-        return cord_test_onos_restart(config=config)
-
-    @classmethod
-    def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
-        log_test.info('Restarting Quagga container with configuration for %d networks' %(networks))
-        config = cls.generate_conf(networks = networks, peer_address = peer_address, router_address = router_address)
-        if networks <= 10000:
-            boot_delay = 25
-        else:
-            delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
-            n = min(networks/100000, len(delay_map)-1)
-            boot_delay = delay_map[n]
-        cord_test_quagga_restart(config = config, boot_delay = boot_delay)
-
-    @classmethod
-    def generate_vrouter_conf(cls, networks = 4, peers = 1, peer_address = None, router_address = None):
-        num = 0
-        if peer_address is None:
-           start_peer = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
-           end_peer =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        else:
-           ip = peer_address[0][0]
-           start_ip = ip.split('.')
-           start_peer = ( int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_peer =   ((int(start_ip[0]) + 8) << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        local_network = end_peer + 1
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        peer_list = []
-        for n in xrange(start_peer, end_peer, 256):
-            port_map = ports_dict['ports']
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 2
-            peer_ip = n + 1
-            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
-            peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
-            mac = RandMAC()._fix()
-            peer_list.append((peer, mac))
-            if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == peers:
-                break
-        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
-        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
-        quagga_router_dict['ospfEnabled'] = True
-        quagga_router_dict['interfaces'] = interface_list
-        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-
-        #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
-        bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
-        speaker_dict = {}
-        speaker_dict['name'] = 'bgp{}'.format(peers+1)
-        speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-        speaker_dict['peers'] = peer_list
-        bgp_speakers_list.append(speaker_dict)
-        cls.peer_list = peer_list
-        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
-
-    @classmethod
-    def generate_conf(cls, networks = 4, peer_address = None, router_address = None):
-        num = 0
-        if router_address is None:
-            start_network = ( 11 << 24) | ( 10 << 16) | ( 10 << 8) | 0
-            end_network =   ( 172 << 24 ) | ( 0 << 16)  | (0 << 8) | 0
-            network_mask = 24
-        else:
-           ip = router_address
-           start_ip = ip.split('.')
-           network_mask = int(start_ip[3].split('/')[1])
-           start_ip[3] = (start_ip[3].split('/'))[0]
-           start_network = (int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_network = (172 << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        net_list = []
-        peer_list = peer_address if peer_address is not None else cls.peer_list
-        network_list = []
-        for n in xrange(start_network, end_network, 256):
-            net = '%d.%d.%d.0'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
-            network_list.append(net)
-            gateway = peer_list[num % len(peer_list)][0]
-            net_route = 'ip route {0}/{1} {2}'.format(net, network_mask, gateway)
-            net_list.append(net_route)
-            num += 1
-            if num == networks:
-                break
-        cls.network_list = network_list
-        cls.network_mask = network_mask
-        zebra_routes = '\n'.join(net_list)
-        #log_test.info('Zebra routes: \n:%s\n' %cls.zebra_conf + zebra_routes)
-        return cls.zebra_conf + zebra_routes
-
-    @classmethod
-    def vrouter_activate(cls, deactivate = False,controller=None):
-        app = 'org.onosproject.vrouter'
-        onos_ctrl = OnosCtrl(app,controller=controller)
-        if deactivate is True:
-            onos_ctrl.deactivate()
-        else:
-            onos_ctrl.activate()
-        time.sleep(3)
-
-    @classmethod
-    def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,
-                          route_update = None, router_address = None, time_expire = None, adding_new_routes = None,controller=None):
-        vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,
-                                                 peer_address = peer_address, route_update = route_update)
-        cls.start_onos(network_cfg = vrouter_configs)
-        cls.vrouter_host_load(controller=controller)
-        ##Start quagga
-        cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
-                  prn = recv_cb, iface = self.port_map[ingress])
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = src_mac, dst = dst_mac)
-        L3 = IP(src = src_ip, dst = dst_ip)
-        pkt = L2/L3
-        log_test.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
-                 (dst_ip, dst_mac, self.port_map[egress]))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = self.port_map[egress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def vrouter_traffic_verify(self, positive_test = True, peer_address = None):
-        if peer_address is None:
-            peers = len(self.peer_list)
-            peer_list = self.peer_list
-        else:
-            peers = len(peer_address)
-            peer_list = peer_address
-        egress = peers + 1
-        num = 0
-        num_hosts = 5 if positive_test else 1
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        if self.network_mask != 24:
-            peers = 1
-        for network in self.network_list:
-            num_ips = num_hosts
-            octets = network.split('.')
-            for i in xrange(num_ips):
-                octets[-1] = str(int(octets[-1]) + 1)
-                dst_ip = '.'.join(octets)
-                dst_mac = peer_list[ num % peers ] [1]
-                port = (num % peers)
-                ingress = port + 1
-                #Since peers are on the same network
-                ##Verify if flows are setup by sending traffic across
-                self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
-            num += 1
-
-    def vrouter_network_verify(self, networks, peers = 1, positive_test = True,
-                                 start_network = None, start_peer_address = None, route_update = None,
-                                 invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
-                                 deactivate_activate_vrouter = None, adding_new_routes = None,controller=None):
-
-        _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
-                                                          peer_address = start_peer_address,
-                                                          route_update = route_update,
-                                                          router_address = start_network,
-                                                          time_expire = time_expire,
-                                                          adding_new_routes = adding_new_routes,
-							  controller=controller)
-        self.cliEnter(controller=controller)
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log_test.info('Discovered hosts: %s' %hosts)
-        ##We read from cli if we expect less number of routes to avoid cli timeouts
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            #log_test.info('Routes: %s' %routes)
-            if start_network is not None:
-               if start_network.split('/')[1] is 24:
-                  assert_equal(len(routes['routes4']), networks)
-               if start_network.split('/')[1] is not 24:
-                  assert_equal(len(routes['routes4']), 1)
-            if start_network is None and invalid_peers is None:
-               assert_equal(len(routes['routes4']), networks)
-            if invalid_peers is not None:
-               assert_equal(len(routes['routes4']), 0)
-            flows = json.loads(self.cli.flows(jsonFormat = True))
-            flows = filter(lambda f: f['flows'], flows)
-            #log_test.info('Flows: %s' %flows)
-            assert_not_equal(len(flows), 0)
-        if invalid_peers is None:
-            self.vrouter_traffic_verify()
-        if positive_test is False:
-            self.vrouter_network_verify_negative(networks, peers = peers)
-        if time_expire is True:
-            self.start_quagga(networks = networks, peer_address = start_peer_address, router_address = '12.10.10.1/24')
-            self.vrouter_traffic_verify()
-        if unreachable_route_traffic is True:
-            network_list_backup = self.network_list
-            self.network_list = ['2.2.2.2','3.3.3.3','4.4.4.4','5.5.5.5']
-            self.vrouter_traffic_verify(positive_test = False)
-            self.network_list = network_list_backup
-        if deactivate_activate_vrouter is True:
-            log_test.info('Deactivating vrouter app in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = True)
-            #routes = json.loads(self.cli.routes(jsonFormat = False, cmd_exist = False))
-            #assert_equal(len(routes['routes4']), 'Command not found')
-            log_test.info('Activating vrouter app again in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = False)
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            assert_equal(len(routes['routes4']), networks)
-            self.vrouter_traffic_verify()
-        self.cliExit()
-	return True
-
-    def vrouter_network_verify_negative(self, networks, peers = 1):
-        ##Stop quagga. Test traffic again to see if flows were removed
-        log_test.info('Stopping Quagga container')
-        cord_test_quagga_stop()
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            #Verify routes have been removed
-            if routes and routes.has_key('routes4'):
-                assert_equal(len(routes['routes4']), 0)
-        self.vrouter_traffic_verify(positive_test = False)
-        log_test.info('OVS flows have been removed successfully after Quagga was stopped')
-        self.start_quagga(networks = networks)
-        ##Verify the flows again after restarting quagga back
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            assert_equal(len(routes['routes4']), networks)
-        self.vrouter_traffic_verify()
-        log_test.info('OVS flows have been successfully reinstalled after Quagga was restarted')
-
-    def quagga_shell(self, cmd):
-        shell_cmds = ('vtysh', '"conf t"', '"{}"'.format(cmd))
-        quagga_cmd = ' -c '.join(shell_cmds)
-
-        return cord_test_quagga_shell(quagga_cmd)
-
-class cluster_acl(object):
-    app = 'org.onosproject.acl'
-    device_id = 'of:' + get_mac('ovsbr0')
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    ingress_iface = 1
-    egress_iface = 2
-    MAX_PORTS = 100
-    CURRENT_PORT_NUM = egress_iface
-    ACL_SRC_IP = '192.168.20.3/32'
-    ACL_DST_IP = '192.168.30.2/32'
-    ACL_SRC_IP_RULE_2 = '192.168.40.3/32'
-    ACL_DST_IP_RULE_2 = '192.168.50.2/32'
-    ACL_SRC_IP_PREFIX_24 = '192.168.20.3/24'
-    ACL_DST_IP_PREFIX_24 = '192.168.30.2/24'
-    HOST_DST_IP = '192.168.30.0/24'
-    HOST_DST_IP_RULE_2 = '192.168.50.0/24'
-
-    @classmethod
-    def setUpClass(cls):
-        cls.olt = OltConfig()
-        cls.port_map,_ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        log_test.info('port_map = %s'%cls.port_map[1] )
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the acl app'''
-    def setUp(self,controller=None):
-	self.setUpClass()
-        ''' Activate the acl app'''
-        self.maxDiff = None ##for assert_equal compare outputs on failure
-        self.onos_ctrl = OnosCtrl(self.app,controller=controller)
-        status, _ = self.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(1)
-        #status, _ = ACLTest.remove_acl_rule()
-        #log_test.info('Start setup')
-        #assert_equal(status, True)
-
-    def tearDown(self):
-        '''Deactivate the acl app'''
-        log_test.info('Tear down setup')
-        self.CURRENT_PORT_NUM = 4
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def acl_hosts_add(cls, dstHostIpMac, egress_iface_count = 1,  egress_iface_num = None):
-	cls.setUpClass()
-        index = 0
-        if egress_iface_num is None:
-            egress_iface_num = cls.egress_iface
-        for ip,_ in dstHostIpMac:
-            egress = cls.port_map[egress_iface_num]
-            log_test.info('Assigning ip %s to interface %s' %(ip, egress))
-            config_cmds_egress = ( 'ifconfig {} 0'.format(egress),
-                                   'ifconfig {0} up'.format(egress),
-                                   'ifconfig {0} {1}'.format(egress, ip),
-                                   'arping -I {0} {1} -c 2'.format(egress, ip.split('/')[0]),
-                                   'ifconfig {0}'.format(egress),
-                                 )
-            for cmd in config_cmds_egress:
-                os.system(cmd)
-            index += 1
-            if index == egress_iface_count:
-               break
-            egress_iface_count += 1
-            egress_iface_num += 1
-
-
-    @classmethod
-    def acl_hosts_remove(cls, egress_iface_count = 1,  egress_iface_num = None):
-	cls.setUpClass()
-        if egress_iface_num is None:
-           egress_iface_num = cls.egress_iface
-        n = 0
-        for n in range(egress_iface_count):
-           egress = cls.port_map[egress_iface_num]
-           config_cmds_egress = ('ifconfig {} 0'.format(egress))
-           os.system(config_cmds_egress)
-           egress_iface_num += 1
-
-    def acl_rule_traffic_send_recv(self, srcMac, dstMac, srcIp, dstIp, ingress =None, egress=None, ip_proto=None, dstPortNum = None, positive_test = True):
-	self.setUpClass()
-        if ingress is None:
-           ingress = self.ingress_iface
-        if egress is None:
-           egress = self.egress_iface
-        ingress = self.port_map[ingress]
-        egress = self.port_map[egress]
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dstIp.split('/')[0] and p[IP].src == srcIp.split('/')[0],
-                  prn = recv_cb, iface = egress)
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = srcMac, dst = dstMac)
-        L3 = IP(src = srcIp.split('/')[0], dst = dstIp.split('/')[0])
-        pkt = L2/L3
-        log_test.info('Sending a packet with dst ip %s, src ip %s , dst mac %s src mac %s on port %s to verify if flows are correct' %
-                 (dstIp.split('/')[0], srcIp.split('/')[0], dstMac, srcMac, ingress))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = ingress)
-        t.join()
-        assert_equal(self.success, True)
-
-    @classmethod
-    def onos_load_config(cls, config,controller=None):
-        status, code = OnosCtrl.config(config,controller=controller)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-class cluster_dhcprelay(object):
-    app = 'org.onosproject.dhcprelay'
-    app_dhcp = 'org.onosproject.dhcp'
-    relay_interfaces_last = ()
-    interface_to_mac_map = {}
-    host_ip_map = {}
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    dhcp_data_dir = os.path.join(test_path, '..', 'setup')
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
-    default_options = [ ('subnet-mask', '255.255.255.0'),
-                     ('broadcast-address', '192.168.1.255'),
-                     ('domain-name-servers', '192.168.1.1'),
-                     ('domain-name', '"mydomain.cord-tester"'),
-                   ]
-    ##specify the IP for the dhcp interface matching the subnet and subnet config
-    ##this is done for each interface dhcpd server would be listening on
-    default_subnet_config = [ ('192.168.1.2',
-'''
-subnet 192.168.1.0 netmask 255.255.255.0 {
-    range 192.168.1.10 192.168.1.100;
-}
-'''), ]
-
-    lock = threading.Condition()
-    ip_count = 0
-    failure_count = 0
-    start_time = 0
-    diff = 0
-
-    transaction_count = 0
-    transactions = 0
-    running_time = 0
-    total_success = 0
-    total_failure = 0
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-
-    @classmethod
-    def setUpClass(cls,controller=None):
-	log_test.info('controller ip in dhcp setup def is %s'%controller)
-        ''' Activate the dhcprelay app'''
-        OnosCtrl(cls.app_dhcp,controller=controller).deactivate()
-        time.sleep(3)
-        cls.onos_ctrl = OnosCtrl(cls.app,controller=controller)
-        status, _ = cls.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(3)
-        cls.dhcp_relay_setup(controller=controller)
-        ##start dhcpd initially with default config
-        cls.dhcpd_start(controller=controller)
-
-    @classmethod
-    def tearDownClass(cls,controller=None):
-        '''Deactivate the dhcp relay app'''
-        try:
-            os.unlink('{}/dhcpd.conf'.format(cls.dhcp_data_dir))
-            os.unlink('{}/dhcpd.leases'.format(cls.dhcp_data_dir))
-        except: pass
-	OnosCtrl(cls.app,controller=controller).deactivate()
-        #cls.onos_ctrl.deactivate()
-        cls.dhcpd_stop()
-        #cls.dhcp_relay_cleanup()
-
-    @classmethod
-    def dhcp_relay_setup(cls,controller=None):
-        did = OnosCtrl.get_device_id()
-        cls.relay_device_id = did
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.port_map:
-            ##Per subscriber, we use 1 relay port
-            try:
-                relay_port = cls.port_map[cls.port_map['relay_ports'][0]]
-            except:
-                relay_port = cls.port_map['uplink']
-            cls.relay_interface_port = relay_port
-            cls.relay_interfaces = (cls.port_map[cls.relay_interface_port],)
-        else:
-            cls.relay_interface_port = 100
-            cls.relay_interfaces = (g_subscriber_port_map[cls.relay_interface_port],)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        if cls.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in cls.port_map['ports']:
-                port_num = cls.port_map[port]
-                if port_num == cls.port_map['uplink']:
-                    continue
-                ip = cls.get_host_ip(port_num)
-                mac = cls.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure dhcp server virtual interface on the same subnet as first client interface
-            relay_ip = cls.get_host_ip(interface_list[0][0])
-            relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
-            interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
-            cls.onos_interface_load(interface_list,controller=controller)
-
-    @classmethod
-    def dhcp_relay_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        if cls.onos_restartable is True:
-            log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
-            return cord_test_onos_restart(config = {})
-
-    @classmethod
-    def onos_load_config(cls, config,controller=None):
-	log_test.info('loading onos config in controller %s'%controller)
-        status, code = OnosCtrl.config(config,controller=controller)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-        time.sleep(2)
-
-    @classmethod
-    def onos_interface_load(cls, interface_list,controller=None):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(cls.relay_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        cls.onos_load_config(interface_dict,controller=controller)
-
-    @classmethod
-    def onos_dhcp_relay_load(cls, server_ip, server_mac,controller=None):
-        relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
-        dhcp_dict = {'apps':{'org.onosproject.dhcp-relay':{'dhcprelay':
-                                                          {'dhcpserverConnectPoint':relay_device_map,
-                                                           'serverip':server_ip,
-                                                           'servermac':server_mac
-                                                           }
-                                                           }
-                             }
-                     }
-        cls.onos_load_config(dhcp_dict,controller=controller)
-
-    @classmethod
-    def get_host_ip(cls, port):
-        if cls.host_ip_map.has_key(port):
-            return cls.host_ip_map[port]
-        cls.host_ip_map[port] = '192.168.1.{}'.format(port)
-        return cls.host_ip_map[port]
-
-    @classmethod
-    def host_load(cls, iface):
-        '''Have ONOS discover the hosts for dhcp-relay responses'''
-        port = g_subscriber_port_map[iface]
-        host = '173.17.1.{}'.format(port)
-        cmds = ( 'ifconfig {} 0'.format(iface),
-                 'ifconfig {0} {1}'.format(iface, host),
-                 'arping -I {0} {1} -c 2'.format(iface, host),
-                 'ifconfig {} 0'.format(iface), )
-        for c in cmds:
-            os.system(c)
-    @classmethod
-
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    @classmethod
-    def dhcpd_start(cls, intf_list = None,
-                    config = default_config, options = default_options,
-                    subnet = default_subnet_config,controller=None):
-        '''Start the dhcpd server by generating the conf file'''
-        if intf_list is None:
-            intf_list = cls.relay_interfaces
-        ##stop dhcpd if already running
-        cls.dhcpd_stop()
-        dhcp_conf = cls.dhcpd_conf_generate(config = config, options = options,
-                                            subnet = subnet)
-        ##first touch dhcpd.leases if it doesn't exist
-        lease_file = '{}/dhcpd.leases'.format(cls.dhcp_data_dir)
-        if os.access(lease_file, os.F_OK) is False:
-            with open(lease_file, 'w') as fd: pass
-
-        conf_file = '{}/dhcpd.conf'.format(cls.dhcp_data_dir)
-        with open(conf_file, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        #now configure the dhcpd interfaces for various subnets
-        index = 0
-        intf_info = []
-        for ip,_ in subnet:
-            intf = intf_list[index]
-            mac = cls.get_mac(intf)
-            intf_info.append((ip, mac))
-            index += 1
-            os.system('ifconfig {} {}'.format(intf, ip))
-
-        intf_str = ','.join(intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format(conf_file, lease_file, intf_str)
-        log_test.info('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        ret = os.system(dhcpd_cmd)
-        assert_equal(ret, 0)
-        time.sleep(3)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        cls.relay_interfaces = intf_list
-        cls.onos_dhcp_relay_load(*intf_info[0],controller=controller)
-
-    @classmethod
-    def dhcpd_stop(cls):
-        os.system('pkill -9 dhcpd')
-        for intf in cls.relay_interfaces:
-            os.system('ifconfig {} 0'.format(intf))
-
-        cls.relay_interfaces = cls.relay_interfaces_last
-
-    @classmethod
-    def get_mac(cls, iface):
-        if cls.interface_to_mac_map.has_key(iface):
-            return cls.interface_to_mac_map[iface]
-        mac = get_mac(iface, pad = 0)
-        cls.interface_to_mac_map[iface] = mac
-        return mac
-
-    def stats(self,success_rate = False, only_discover = False, iface = 'veth0'):
-
-        self.ip_count = 0
-        self.failure_count = 0
-        self.start_time = 0
-        self.diff = 0
-        self.transaction_count = 0
-
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-        self.start_time = time.time()
-
-        while self.diff <= 60:
-
-            if only_discover:
-                cip, sip, mac, _ = self.dhcp.only_discover(multiple = True)
-                log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                        (cip, sip, mac))
-            else:
-                cip, sip = self.send_recv(mac, update_seed = True, validate = False)
-
-            if cip:
-                self.ip_count +=1
-            elif cip == None:
-                self.failure_count += 1
-                log_test.info('Failed to get ip')
-                if success_rate and self.ip_count > 0:
-                        break
-
-            self.diff = round(time.time() - self.start_time, 0)
-
-        self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
-        self.transactions += (self.ip_count+self.failure_count)
-        self.running_time += self.diff
-        self.total_success += self.ip_count
-        self.total_failure += self.failure_count
-
-    def send_recv(self, mac, update_seed = False, validate = True):
-        cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
-        if validate:
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                (cip, sip, self.dhcp.get_mac(cip)[0]))
-        return cip,sip
-
-class Subscriber(Channels):
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    INTF_TX_DEFAULT = 'veth2'
-    INTF_RX_DEFAULT = 'veth0'
-    STATS_RX = 0
-    STATS_TX = 1
-    STATS_JOIN = 2
-    STATS_LEAVE = 3
-    SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
-    def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
-        num = 1, channel_start = 0,
-        tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
-        iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
-        mcast_cb = None, loginType = 'wireless'):
-        self.tx_port = tx_port
-        self.rx_port = rx_port
-        self.port_map = port_map or g_subscriber_port_map
-        try:
-            self.tx_intf = self.port_map[tx_port]
-            self.rx_intf = self.port_map[rx_port]
-        except:
-            self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-            self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-        log_test.info('Subscriber %s, rx interface %s, uplink interface %s' %(name, self.rx_intf, self.tx_intf))
-        Channels.__init__(self, num, channel_start = channel_start,
-                                   iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
-        self.name = name
-        self.service = service
-        self.service_map = {}
-        services = self.service.strip().split(' ')
-        for s in services:
-            self.service_map[s] = True
-        self.loginType = loginType
-        ##start streaming channels
-        self.join_map = {}
-        ##accumulated join recv stats
-        self.join_rx_stats = Stats()
-        self.recv_timeout = False
-
-    def has_service(self, service):
-        if self.service_map.has_key(service):
-            return self.service_map[service]
-        if self.service_map.has_key(service.upper()):
-            return self.service_map[service.upper()]
-        return False
-
-    def channel_join_update(self, chan, join_time):
-        self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
-        self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
-    def channel_join(self, chan = 0, delay = 2):
-        '''Join a channel and create a send/recv stats map'''
-        if self.join_map.has_key(chan):
-            del self.join_map[chan]
-        self.delay = delay
-        chan, join_time = self.join(chan)
-        self.channel_join_update(chan, join_time)
-        return chan
-
-    def channel_join_next(self, delay = 2):
-        '''Joins the next channel leaving the last channel'''
-        if self.last_chan:
-            if self.join_map.has_key(self.last_chan):
-                del self.join_map[self.last_chan]
-        self.delay = delay
-        chan, join_time = self.join_next()
-        self.channel_join_update(chan, join_time)
-        return chan
-
-    def channel_jump(self, delay = 2):
-        '''Jumps randomly to the next channel leaving the last channel'''
-        if self.last_chan is not None:
-            if self.join_map.has_key(self.last_chan):
-                del self.join_map[self.last_chan]
-        self.delay = delay
-        chan, join_time = self.jump()
-        self.channel_join_update(chan, join_time)
-        return chan
-
-    def channel_leave(self, chan = 0):
-        if self.join_map.has_key(chan):
-            del self.join_map[chan]
-        self.leave(chan)
-
-    def channel_update(self, chan, stats_type, packets, t=0):
-        if type(chan) == type(0):
-            chan_list = (chan,)
-        else:
-            chan_list = chan
-        for c in chan_list:
-            if self.join_map.has_key(c):
-                self.join_map[c][stats_type].update(packets = packets, t = t)
-    def channel_receive(self, chan, cb = None, count = 1, timeout = 5):
-        log_test.info('Subscriber %s on port %s receiving from group %s, channel %d' %
-                  (self.name, self.rx_intf, self.gaddr(chan), chan))
-        r = self.recv(chan, cb = cb, count = count, timeout = timeout)
-        if len(r) == 0:
-            log_test.info('Subscriber %s on port %s timed out' %(self.name, self.rx_intf))
-        else:
-            log_test.info('Subscriber %s on port %s received %d packets' %(self.name, self.rx_intf, len(r)))
-        if self.recv_timeout:
-            ##Negative test case is disabled for now
-            assert_equal(len(r), 0)
-
-    def recv_channel_cb(self, pkt):
-        ##First verify that we have received the packet for the joined instance
-        log_test.info('Packet received for group %s, subscriber %s, port %s' %
-                 (pkt[IP].dst, self.name, self.rx_intf))
-        if self.recv_timeout:
-            return
-        chan = self.caddr(pkt[IP].dst)
-        assert_equal(chan in self.join_map.keys(), True)
-        recv_time = monotonic.monotonic() * 1000000
-        join_time = self.join_map[chan][self.STATS_JOIN].start
-        delta = recv_time - join_time
-        self.join_rx_stats.update(packets=1, t = delta, usecs = True)
-        self.channel_update(chan, self.STATS_RX, 1, t = delta)
-        log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-
-class subscriber_pool:
-
-    def __init__(self, subscriber, test_cbs):
-        self.subscriber = subscriber
-        self.test_cbs = test_cbs
-
-    def pool_cb(self):
-        for cb in self.test_cbs:
-            if cb:
-                self.test_status = cb(self.subscriber)
-                if self.test_status is not True:
-                    ## This is chaning for other sub status has to check again
-                    self.test_status = True
-                    log_test.info('This service is failed and other services will not run for this subscriber')
-                    break
-        log_test.info('This Subscriber is tested for multiple service eligibility ')
-        self.test_status = True
-
-class cluster_subscriber(object):
-      apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
-      olt_apps = () #'org.opencord.cordmcast')
-      vtn_app = 'org.opencord.vtn'
-      table_app = 'org.ciena.cordigmp'
-      dhcp_server_config = {
-        "ip": "10.1.11.50",
-        "mac": "ca:fe:ca:fe:ca:fe",
-        "subnet": "255.255.252.0",
-        "broadcast": "10.1.11.255",
-        "router": "10.1.8.1",
-        "domain": "8.8.8.8",
-        "ttl": "63",
-        "delay": "2",
-        "startip": "10.1.11.51",
-        "endip": "10.1.11.100"
-      }
-
-      aaa_loaded = False
-      test_path = os.path.dirname(os.path.realpath(__file__))
-      table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
-      app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
-      onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-      olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-      cpqd_path = os.path.join(test_path, '..', 'setup')
-      ovs_path = cpqd_path
-      test_services = ('IGMP', 'TRAFFIC')
-      num_joins = 0
-      num_subscribers = 0
-      num_channels = 0
-      recv_timeout = False
-      onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      SUBSCRIBER_TIMEOUT = 300
-      CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-      CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-      @classmethod
-      def load_device_id(cls):
-            '''Configure the device id'''
-            did = OnosCtrl.get_device_id()
-            #Set the default config
-            cls.device_id = did
-            cls.device_dict = { "devices" : {
-                        "{}".format(did) : {
-                              "basic" : {
-                                    "driver" : "voltha"
-                                    }
-                              }
-                        },
-                  }
-            return did
-
-      @classmethod
-      def setUpClass(cls,controller=None):
-	  log_test.info('controller ip in cluster.py setupclass is %s'%controller)
-          '''Load the OLT config and activate relevant apps'''
-          did = cls.load_device_id()
-          network_cfg = { "devices" : {
-                  "{}".format(did) : {
-                        "basic" : {
-                              "driver" : "voltha"
-                              }
-                        }
-                  },
-          }
-          ## Restart ONOS with cpqd driver config for OVS
-	  print('onos restart in setUpClass')
-          cls.start_onos(network_cfg = network_cfg)
-	  #status, code = OnosCtrl.config(network_cfg)
-          #if status is False:
-          #   log_test.info('JSON config request for app %s returned status %d' %(app, code))
-          #assert_equal(status, True)
-          #time.sleep(2)
-          cls.install_app_table(controller=controller)
-          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-          OnosCtrl.cord_olt_config(cls.olt, controller=controller)
-          cls.port_map, cls.port_list = cls.olt.olt_port_map()
-          cls.activate_apps(cls.apps + cls.olt_apps,controller=controller)
-
-      @classmethod
-      def tearDownClass(cls,controller=None):
-          '''Deactivate the olt apps and restart OVS back'''
-          apps = cls.olt_apps + ( cls.table_app,)
-          for app in apps:
-              onos_ctrl = OnosCtrl(app,controller=controller)
-              onos_ctrl.deactivate()
-          cls.uninstall_app_table()
-          cls.start_onos(network_cfg = {})
-
-      @classmethod
-      def activate_apps(cls, apps,controller=None):
-            for app in apps:
-                  onos_ctrl = OnosCtrl(app,controller=controller)
-                  status, _ = onos_ctrl.activate()
-                  assert_equal(status, True)
-                  time.sleep(2)
-      @classmethod
-      def install_app_table(cls,controller=None):
-            ##Uninstall the existing app if any
-            OnosCtrl.uninstall_app(cls.table_app,onos_ip=controller)
-            time.sleep(2)
-            log_test.info('Installing the multi table app %s for subscriber test' %(cls.table_app_file))
-            OnosCtrl.install_app(cls.table_app_file,onos_ip=controller)
-            time.sleep(3)
-            #onos_ctrl = OnosCtrl(cls.vtn_app)
-            #onos_ctrl.deactivate()
-
-      @classmethod
-      def uninstall_app_table(cls,controller=None):
-            ##Uninstall the table app on class exit
-            OnosCtrl.uninstall_app(cls.table_app,onos_ip=controller)
-            time.sleep(2)
-            log_test.info('Installing back the cord igmp app %s for subscriber test on exit' %(cls.app_file))
-            OnosCtrl.install_app(cls.app_file,onos_ip=controller)
-            #onos_ctrl = OnosCtrl(cls.vtn_app)
-            #onos_ctrl.activate()
-
-      @classmethod
-      def start_onos(cls, network_cfg = None):
-            if cls.onos_restartable is False:
-                  log_test.info('ONOS restart is disabled. Skipping ONOS restart')
-                  return
-            if network_cfg is None:
-                  network_cfg = cls.device_dict
-
-            if type(network_cfg) is tuple:
-                  res = []
-                  for v in network_cfg:
-                        res += v.items()
-                  config = dict(res)
-            else:
-                  config = network_cfg
-            log_test.info('Restarting ONOS with new network configuration')
-            #return cord_test_onos_restart(config = config)
-
-      @classmethod
-      def remove_onos_config(cls):
-            try:
-                  os.unlink('{}/network-cfg.json'.format(cls.onos_config_path))
-            except: pass
-      @classmethod
-      def start_cpqd(cls, mac = '00:11:22:33:44:55'):
-            dpid = mac.replace(':', '')
-            cpqd_file = os.sep.join( (cls.cpqd_path, 'cpqd.sh') )
-            cpqd_cmd = '{} {}'.format(cpqd_file, dpid)
-            ret = os.system(cpqd_cmd)
-            assert_equal(ret, 0)
-            time.sleep(10)
-            device_id = 'of:{}{}'.format('0'*4, dpid)
-            return device_id
-
-      @classmethod
-      def start_ovs(cls):
-            ovs_file = os.sep.join( (cls.ovs_path, 'of-bridge.sh') )
-            ret = os.system(ovs_file)
-            assert_equal(ret, 0)
-            time.sleep(30)
-
-      @classmethod
-      def ovs_cleanup(cls):
-            ##For every test case, delete all the OVS groups
-            cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
-            cord_test_shell(cmd)
-            ##Since olt config is used for this test, we just fire a careless local cmd as well
-            try:
-                  os.system(cmd)
-            except: pass
-
-      def onos_aaa_load(self,controller=None):
-	    log_test.info('controller ip in cluster.py onos_aaa_load is %s'%controller)
-            if self.aaa_loaded:
-                  return
-            OnosCtrl.aaa_load_config(controller = controller)
-            self.aaa_loaded = True
-
-      def onos_dhcp_table_load(self, config = None,controller=None):
-	  log_test.info('controller ip in cluster.py onos_dhcp_table_load is %s'%controller)
-          dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
-          dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
-          if config:
-              for k in config.keys():
-                  if dhcp_config.has_key(k):
-                      dhcp_config[k] = config[k]
-          self.onos_load_config('org.onosproject.dhcp', dhcp_dict,controller=controller)
-
-      def onos_load_config(self, app, config,controller=None):
-	  log_test.info('controller ip in cluster.py onos_load_config is %s'%controller)
-          status, code = OnosCtrl(controller=controller).config(config)
-          if status is False:
-             log_test.info('JSON config request for app %s returned status %d' %(app, code))
-             assert_equal(status, True)
-          time.sleep(2)
-      def dhcp_sndrcv(self, dhcp, update_seed = False):
-            cip, sip = dhcp.discover(update_seed = update_seed)
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-            log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                     (cip, sip, dhcp.get_mac(cip)[0]))
-            return cip,sip
-
-      def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
-            config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
-                      'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                      'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-            self.onos_dhcp_table_load(config)
-            dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
-            cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
-            return cip, sip
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            chan = self.subscriber.caddr(pkt[IP].dst)
-            assert_equal(chan in self.subscriber.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
-            log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-            self.test_status = True
-
-      def traffic_verify(self, subscriber):
-            if subscriber.has_service('TRAFFIC'):
-                  url = 'http://www.google.com'
-                  resp = requests.get(url)
-                  self.test_status = resp.ok
-                  if resp.ok == False:
-                        log_test.info('Subscriber %s failed get from url %s with status code %d'
-                                 %(subscriber.name, url, resp.status_code))
-                  else:
-                        log_test.info('GET request from %s succeeded for subscriber %s'
-                                 %(url, subscriber.name))
-                  return self.test_status
-
-      def tls_verify(self, subscriber):
-            if subscriber.has_service('TLS'):
-                  time.sleep(2)
-                  tls = TLSAuthTest(intf = subscriber.rx_intf)
-                  log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-                  tls.runTest()
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  self.test_status = True
-                  return self.test_status
-      def dhcp_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, update_seed = True)
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_jump_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-      def dhcp_next_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-      def igmp_verify(self, subscriber):
-            chan = 0
-            if subscriber.has_service('IGMP'):
-                  ##We wait for all the subscribers to join before triggering leaves
-                  if subscriber.rx_port > 1:
-                        time.sleep(5)
-                  subscriber.channel_join(chan, delay = 0)
-                  self.num_joins += 1
-                  while self.num_joins < self.num_subscribers:
-                        time.sleep(5)
-                  log_test.info('All subscribers have joined the channel')
-                  for i in range(10):
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        log_test.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_leave(chan)
-                        time.sleep(5)
-                        log_test.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
-                        #Should not receive packets for this subscriber
-                        self.recv_timeout = True
-                        subscriber.recv_timeout = True
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        subscriber.recv_timeout = False
-                        self.recv_timeout = False
-                        log_test.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_join(chan, delay = 0)
-                  self.test_status = True
-                  return self.test_status
-
-      def igmp_jump_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        log_test.info('Subscriber %s jumping channel' %subscriber.name)
-                        chan = subscriber.channel_jump(delay=0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-      def igmp_next_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        if i:
-                              chan = subscriber.channel_join_next(delay=0)
-                        else:
-                              chan = subscriber.channel_join(i, delay=0)
-                        log_test.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-                        log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log_test.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-
-      def generate_port_list(self, subscribers, channels):
-            return self.port_list[:subscribers]
-
-      def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = [],controller=None):
-            '''Load the subscriber from the database'''
-            self.subscriber_db = SubscriberDB(create = create, services = self.test_services)
-            if create is True:
-                  self.subscriber_db.generate(num)
-            self.subscriber_info = self.subscriber_db.read(num)
-            self.subscriber_list = []
-            if not port_list:
-                  port_list = self.generate_port_list(num, num_channels)
-
-            index = 0
-            for info in self.subscriber_info:
-                  self.subscriber_list.append(Subscriber(name=info['Name'],
-                                                         service=info['Service'],
-                                                         port_map = self.port_map,
-                                                         num=num_channels,
-                                                         channel_start = channel_start,
-                                                         tx_port = port_list[index][0],
-                                                         rx_port = port_list[index][1]))
-                  if num_channels > 1:
-                        channel_start += num_channels
-                  index += 1
-
-            #load the ssm list for all subscriber channels
-            igmpChannel = IgmpChannel(controller=controller)
-            ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
-            ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
-            igmpChannel.igmp_load_ssm_config(ssm_list)
-      def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
-                                  channel_start = 0, cbs = None, port_list = [], negative_subscriber_auth = None,controller=None):
-	  log_test.info('controller ip in cluster.py subscriber_join_verify is %s'%controller)
-          self.test_status = False
-          self.ovs_cleanup()
-          subscribers_count = num_subscribers
-          sub_loop_count =  num_subscribers
-          self.subscriber_load(create = True, num = num_subscribers,
-                               num_channels = num_channels, channel_start = channel_start, port_list = port_list,controller=controller)
-          self.onos_aaa_load(controller=controller)
-          self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
-
-          chan_leave = False #for single channel, multiple subscribers
-          if None in (cbs, negative_subscriber_auth):
-                cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                chan_leave = True
-          cbs_negative = cbs
-          for subscriber in self.subscriber_list:
-                subscriber.start()
-                if negative_subscriber_auth is 'half' and sub_loop_count%2 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                elif negative_subscriber_auth is 'onethird' and sub_loop_count%3 is not 0:
-                   cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-                else:
-                   cbs = cbs_negative
-                sub_loop_count = sub_loop_count - 1
-                pool_object = subscriber_pool(subscriber, cbs)
-                self.thread_pool.addTask(pool_object.pool_cb)
-          self.thread_pool.cleanUpThreads()
-          for subscriber in self.subscriber_list:
-                subscriber.stop()
-                if chan_leave is True:
-                      subscriber.channel_leave(0)
-          subscribers_count = 0
-          return self.test_status
-      def tls_invalid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_no_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = '')
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_self_signed_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def tls_non_ca_authrized_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_NON_CA_AUTHORIZED)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-      def tls_Nsubscribers_use_same_valid_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             num_users = 3
-             for i in xrange(num_users):
-                 tls = TLSAuthTest(intf = 'veth{}'.format(i*2))
-                 tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-      def dhcp_discover_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-             t1 = self.subscriber_dhcp_1release()
-             self.test_status = True
-             return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_1release(self, iface = INTF_RX_DEFAULT):
-             config = {'startip':'10.10.100.20', 'endip':'10.10.100.21',
-                       'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
-                       'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
-             self.onos_dhcp_table_load(config)
-             self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-             cip, sip = self.send_recv()
-             log_test.info('Releasing ip %s to server %s' %(cip, sip))
-             assert_equal(self.dhcp.release(cip), True)
-             log_test.info('Triggering DHCP discover again after release')
-             cip2, sip2 = self.send_recv(update_seed = True)
-             log_test.info('Verifying released IP was given back on rediscover')
-             assert_equal(cip, cip2)
-             log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-             assert_equal(self.dhcp.release(cip2), True)
-      def dhcp_client_reboot_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                  time.sleep(2)
-                  log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                  tl = self.subscriber_dhcp_client_request_after_reboot()
-                  self.test_status = True
-                  return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_request_after_reboot(self, iface = INTF_RX_DEFAULT):
-          #''' Client sends DHCP Request after reboot.'''
-
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                os.system('ifconfig '+iface+' down')
-                log_test.info('Client goes down.')
-                log_test.info('Delay for 5 seconds.')
-
-                time.sleep(5)
-
-                os.system('ifconfig '+iface+' up')
-                log_test.info('Client is up now.')
-
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                elif new_cip != None:
-                        log_test.info("Got DHCP ACK.")
-      def dhcp_client_renew_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_renew_time()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_renew_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log_test.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-                if new_cip and new_sip and lval:
-                        log_test.info("Client 's Renewal time is :%s",lval)
-                        log_test.info("Generating delay till renewal time.")
-                        time.sleep(lval)
-                        log_test.info("Client Sending Unicast DHCP request.")
-                        latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-                        if latest_cip and latest_sip:
-                                log_test.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                (latest_cip, mac, latest_sip) )
-
-                        elif latest_cip == None:
-                                log_test.info("Got DHCP NAK. Lease not renewed.")
-                elif new_cip == None or new_sip == None or lval == None:
-                        log_test.info("Got DHCP NAK.")
-      def dhcp_server_reboot_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_server_after_reboot()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-      def subscriber_dhcp_server_after_reboot(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP server goes down.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_not_equal(new_cip, None)
-                log_test.info('Getting DHCP server Down.')
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                onos_ctrl.deactivate()
-                for i in range(0,4):
-                        log_test.info("Sending DHCP Request.")
-                        log_test.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log_test.info('')
-                                log_test.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log_test.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-                log_test.info('Getting DHCP server Up.')
-#               self.activate_apps(self.dhcp_app)
-                onos_ctrl = OnosCtrl(self.dhcp_app)
-                status, _ = onos_ctrl.activate()
-                assert_equal(status, True)
-                time.sleep(3)
-                for i in range(0,4):
-                        log_test.info("Sending DHCP Request after DHCP server is up.")
-                        log_test.info('')
-                        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-                        if new_cip == None and new_sip == None:
-                                log_test.info('')
-                                log_test.info("DHCP Request timed out.")
-                        elif new_cip and new_sip:
-                                log_test.info("Got Reply from DHCP server.")
-                                assert_equal(new_cip,None) #Neagtive Test Case
-      def dhcp_client_rebind_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_rebind_time()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_rebind_time(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif cip and sip and mac:
-                log_test.info("Triggering DHCP Request.")
-                new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-                if new_cip and new_sip and lval:
-                        log_test.info("Client 's Rebind time is :%s",lval)
-                        log_test.info("Generating delay till rebind time.")
-                        time.sleep(lval)
-                        log_test.info("Client Sending broadcast DHCP requests for renewing lease or for getting new ip.")
-                        self.dhcp.after_T2 = True
-                        for i in range(0,4):
-                                latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-                                if latest_cip and latest_sip:
-                                        log_test.info("Got DHCP Ack. Lease Renewed for ip %s and mac %s from server %s." %
-                                                        (latest_cip, mac, latest_sip) )
-                                        break
-                                elif latest_cip == None:
-                                        log_test.info("Got DHCP NAK. Lease not renewed.")
-                        assert_not_equal(latest_cip, None)
-                elif new_cip == None or new_sip == None or lval == None:
-                        log_test.info("Got DHCP NAK.Lease not Renewed.")
-      def dhcp_starvation_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_starvation()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_starvation(self, iface = INTF_RX_DEFAULT):
-          '''DHCP starve'''
-          config = {'startip':'182.17.0.20', 'endip':'182.17.0.69',
-                    'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-          log_test.info('Verifying 1 ')
-          for x in xrange(50):
-              mac = RandMAC()._fix()
-              self.send_recv(mac = mac)
-          log_test.info('Verifying 2 ')
-          cip, sip = self.send_recv(update_seed = True, validate = False)
-          assert_equal(cip, None)
-          assert_equal(sip, None)
-
-      def dhcp_same_client_multi_discovers_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_same_client_multiple_discover()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-      def subscriber_dhcp_same_client_multiple_discover(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple discover . '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-                  (cip, sip, mac) )
-          log_test.info('Triggering DHCP discover again.')
-          new_cip, new_sip, new_mac , lval = self.dhcp.only_discover()
-          if cip == new_cip:
-                 log_test.info('Got same ip for 2nd DHCP discover for client IP %s from server %s for mac %s. Triggering DHCP Request. '
-                          % (new_cip, new_sip, new_mac) )
-          elif cip != new_cip:
-                log_test.info('Ip after 1st discover %s' %cip)
-                log_test.info('Map after 2nd discover %s' %new_cip)
-                assert_equal(cip, new_cip)
-
-      def dhcp_same_client_multi_request_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_same_client_multiple_request()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-      def subscriber_dhcp_same_client_multiple_request(self, iface = INTF_RX_DEFAULT):
-          ''' DHCP Client sending multiple repeat DHCP requests. '''
-          config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                    'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                    'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-          log_test.info('Sending DHCP discover and DHCP request.')
-          cip, sip = self.send_recv()
-          mac = self.dhcp.get_mac(cip)[0]
-          log_test.info("Sending DHCP request again.")
-          new_cip, new_sip = self.dhcp.only_request(cip, mac)
-          if (new_cip,new_sip) == (cip,sip):
-                log_test.info('Got same ip for 2nd DHCP Request for client IP %s from server %s for mac %s.'
-                          % (new_cip, new_sip, mac) )
-          elif (new_cip,new_sip):
-                log_test.info('No DHCP ACK')
-                assert_equal(new_cip, None)
-                assert_equal(new_sip, None)
-          else:
-                print "Something went wrong."
-
-      def dhcp_client_desired_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_desired_address()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_client_desired_address(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.31', iface = iface)
-          cip, sip, mac , lval = self.dhcp.only_discover(desired = True)
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-          elif cip != self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log_test.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_equal(cip, self.dhcp.seed_ip)
-      def dhcp_client_request_pkt_with_non_offered_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_server_nak_packet()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-
-      def subscriber_dhcp_server_nak_packet(self, iface = INTF_RX_DEFAULT):
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover()
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          else:
-                new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
-                if new_cip == None:
-                        log_test.info("Got DHCP server NAK.")
-                        assert_equal(new_cip, None)  #Negative Test Case
-
-      def dhcp_client_requested_out_pool_ip_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_client_desired_address_out_of_pool()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-      def subscriber_dhcp_client_desired_address_out_of_pool(self, iface = INTF_RX_DEFAULT):
-          '''DHCP Client asking for desired IP address from out of pool.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-          cip, sip, mac, lval = self.dhcp.only_discover(desired = True)
-          log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-          if cip == self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s as desired .' %
-                  (cip, sip, mac) )
-                assert_equal(cip, self.dhcp.seed_ip) #Negative Test Case
-
-          elif cip != self.dhcp.seed_ip:
-                log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-                log_test.info('The desired ip was: %s .' % self.dhcp.seed_ip)
-                assert_not_equal(cip, self.dhcp.seed_ip)
-
-          elif cip == None:
-                log_test.info('Got DHCP NAK')
-
-      def dhcp_client_specific_lease_scenario(self, subscriber):
-          if subscriber.has_service('DHCP'):
-                time.sleep(2)
-                log_test.info('Running subscriber %s DHCP rediscover scenario test' %subscriber.name)
-                tl = self.subscriber_dhcp_specific_lease_packet()
-                self.test_status = True
-                return self.test_status
-          else:
-              subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-              self.test_status = True
-              return self.test_status
-      def subscriber_dhcp_specific_lease_packet(self, iface = INTF_RX_DEFAULT):
-          ''' Client sends DHCP Discover packet for particular lease time.'''
-          config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                   'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                   'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-          self.onos_dhcp_table_load(config)
-          self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-          log_test.info('Sending DHCP discover with lease time of 700')
-          cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True)
-
-          log_test.info("Verifying Client 's IP and mac in DHCP Offer packet.")
-          if (cip == None and mac != None):
-                log_test.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
-                assert_not_equal(cip, None)
-          elif lval != 700:
-                log_test.info('Getting dhcp client IP %s from server %s for mac %s with lease time %s. That is not 700.' %
-                         (cip, sip, mac, lval) )
-                assert_not_equal(lval, 700)
diff --git a/src/test/utils/CordContainer.py b/src/test/utils/CordContainer.py
deleted file mode 100644
index 4f7287a..0000000
--- a/src/test/utils/CordContainer.py
+++ /dev/null
@@ -1,1465 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,time
-import io
-import json
-import yaml
-import errno
-import copy
-from pyroute2 import IPRoute
-from pyroute2.netlink import NetlinkError
-from itertools import chain
-from nsenter import Namespace
-try:
-    from docker import APIClient as Client
-except:
-    from docker import Client
-from docker import utils as dockerutils
-import shutil
-from OnosCtrl import OnosCtrl
-from OnosLog import OnosLog
-from OltConfig import OltConfig
-from EapolAAA import radius_add_users, radius_restore_users
-from onosclidriver import OnosCliDriver
-from threadPool import ThreadPool
-from threading import Lock
-
-class docker_netns(object):
-
-    dckr = Client()
-    def __init__(self, name):
-        pid = int(self.dckr.inspect_container(name)['State']['Pid'])
-        if pid == 0:
-            raise Exception('no container named {0}'.format(name))
-        self.pid = pid
-
-    def __enter__(self):
-        pid = self.pid
-        if not os.path.exists('/var/run/netns'):
-            os.mkdir('/var/run/netns')
-        os.symlink('/proc/{0}/ns/net'.format(pid), '/var/run/netns/{0}'.format(pid))
-        return str(pid)
-
-    def __exit__(self, type, value, traceback):
-        pid = self.pid
-        os.unlink('/var/run/netns/{0}'.format(pid))
-
-flatten = lambda l: chain.from_iterable(l)
-
-class Container(object):
-    dckr = Client()
-    IMAGE_PREFIX = '' ##for saving global prefix for all test classes
-    CONFIG_LOCK = Lock()
-
-    def __init__(self, name, image, prefix='', tag = 'candidate', command = 'bash', quagga_config = None):
-        self.name = name
-        self.prefix = prefix
-        if prefix:
-            self.prefix += '/'
-            image = '{}{}'.format(self.prefix, image)
-        self.image = image
-        self.tag = tag
-        if tag:
-            self.image_name = image + ':' + tag
-        else:
-            self.image_name = image
-        self.id = None
-        self.command = command
-        self.quagga_config = quagga_config
-
-    @classmethod
-    def build_image(cls, dockerfile, tag, force=True, nocache=False):
-        f = io.BytesIO(dockerfile.encode('utf-8'))
-        if force or not cls.image_exists(tag):
-            print('Build {0}...'.format(tag))
-            for line in cls.dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache):
-                if 'stream' in line:
-                    print(line['stream'].strip())
-
-    @classmethod
-    def image_exists(cls, name):
-        #return name in [ctn['RepoTags'][0] for ctn in cls.dckr.images()]
-        return name in list( flatten(ctn['RepoTags'] if ctn['RepoTags'] else '' for ctn in cls.dckr.images()) )
-
-    @classmethod
-    def create_host_config(cls, port_list = None, host_guest_map = None, privileged = False):
-        port_bindings = None
-        binds = None
-        if port_list:
-            port_bindings = {}
-            for p in port_list:
-                if type(p) is tuple:
-                    port_bindings[str(p[0])] = str(p[1])
-                else:
-                    port_bindings[str(p)] = str(p)
-
-        if host_guest_map:
-            binds = []
-            for h, g in host_guest_map:
-                binds.append('{0}:{1}'.format(h, g))
-
-        return cls.dckr.create_host_config(binds = binds, port_bindings = port_bindings, privileged = privileged)
-
-    @classmethod
-    def connect_to_network(cls, name, network):
-        try:
-            cls.dckr.connect_container_to_network(name, network)
-        except:
-            connect_cmd = 'docker network connect %s %s' %(network, name)
-            os.system(connect_cmd)
-        return True
-
-    @classmethod
-    def create_network(cls, network, subnet = None, gateway = None):
-        ipam_config = None
-        if subnet is not None and gateway is not None:
-            try:
-                ipam_pool = dockerutils.create_ipam_pool(subnet = subnet, gateway = gateway)
-                ipam_config = dockerutils.create_ipam_config(pool_configs = [ipam_pool])
-                cls.dckr.create_network(network, driver='bridge', ipam = ipam_config)
-            except:
-                create_cmd = 'docker network create %s --subnet %s --gateway %s >/dev/null 2>&1' %(network, subnet, gateway)
-                os.system(create_cmd)
-
-    @classmethod
-    def cleanup(cls, image):
-        cnt_list = filter(lambda c: c['Image'] == image, cls.dckr.containers(all=True))
-        for cnt in cnt_list:
-            print('Cleaning container %s' %cnt['Id'])
-            if cnt.has_key('State') and cnt['State'] == 'running':
-                cls.dckr.kill(cnt['Id'])
-            cls.dckr.remove_container(cnt['Id'], force=True)
-
-    @classmethod
-    def remove_container(cls, name, force=True):
-        try:
-            cls.dckr.remove_container(name, force = force)
-        except: pass
-
-    def exists(self):
-        return '/{0}'.format(self.name) in list(flatten(n['Names'] for n in self.dckr.containers()))
-
-    def img_exists(self):
-        #return self.image_name in [ctn['RepoTags'][0] if ctn['RepoTags'] else '' for ctn in self.dckr.images()]
-        return self.image_name in list( flatten(ctn['RepoTags'] if ctn['RepoTags'] else '' for ctn in self.dckr.images()) )
-
-    def ip(self, network = None):
-        cnt_list = filter(lambda c: c['Names'][0] == '/{}'.format(self.name), self.dckr.containers())
-        #if not cnt_list:
-        #    cnt_list = filter(lambda c: c['Image'] == self.image_name, self.dckr.containers())
-        cnt_settings = cnt_list.pop()
-        if network is not None and cnt_settings['NetworkSettings']['Networks'].has_key(network):
-            return cnt_settings['NetworkSettings']['Networks'][network]['IPAddress']
-        return cnt_settings['NetworkSettings']['Networks']['bridge']['IPAddress']
-
-    @classmethod
-    def ips(cls, image_name):
-        cnt_list = filter(lambda c: c['Image'] == image_name, cls.dckr.containers())
-        ips = [ cnt['NetworkSettings']['Networks']['bridge']['IPAddress'] for cnt in cnt_list ]
-        return ips
-
-    def kill(self, remove = True):
-        self.dckr.kill(self.name)
-        self.dckr.remove_container(self.name, force=True)
-
-    def start(self, rm = True, ports = None, volumes = None, host_config = None,
-              environment = None, tty = False, stdin_open = True,
-              network_disabled = False, network = None):
-
-        if rm and self.exists():
-            print('Removing container:', self.name)
-            self.dckr.remove_container(self.name, force=True)
-
-        ctn = self.dckr.create_container(image=self.image_name, ports = ports, command=self.command,
-                                         detach=True, name=self.name,
-                                         environment = environment,
-                                         volumes = volumes,
-                                         host_config = host_config, stdin_open=stdin_open, tty = tty,
-                                         network_disabled = network_disabled)
-        self.dckr.start(container=self.name)
-        if network_disabled is False:
-            if network is not None:
-                self.connect_to_network(self.name, network)
-            if self.quagga_config:
-                self.connect_to_br(index = 1)
-        self.id = ctn['Id']
-        return ctn
-
-    @classmethod
-    def pause_container(cls, image, delay):
-        cnt_list = filter(lambda c: c['Image'] == image, cls.dckr.containers(all=True))
-        for cnt in cnt_list:
-            print('Pause the container %s' %cnt['Id'])
-            if cnt.has_key('State') and cnt['State'] == 'running':
-                cls.dckr.pause(cnt['Id'])
-        if delay != 0:
-           time.sleep(delay)
-           for cnt in cnt_list:
-               print('Unpause the container %s' %cnt['Id'])
-               cls.dckr.unpause(cnt['Id'])
-        else:
-            print('Infinity time pause the container %s' %cnt['Id'])
-        return 'success'
-
-    def connect_to_br(self, index = 0):
-        self.CONFIG_LOCK.acquire()
-        try:
-            with docker_netns(self.name) as pid:
-                for quagga_config in self.quagga_config:
-                    ip = IPRoute()
-                    br = ip.link_lookup(ifname=quagga_config['bridge'])
-                    if len(br) == 0:
-                        try:
-                            ip.link_create(ifname=quagga_config['bridge'], kind='bridge')
-                        except NetlinkError as e:
-                            err, _ = e.args
-                            if err == errno.EEXIST:
-                                pass
-                            else:
-                                raise NetlinkError(*e.args)
-                        br = ip.link_lookup(ifname=quagga_config['bridge'])
-                    br = br[0]
-                    ip.link('set', index=br, state='up')
-                    ifname = '{0}-{1}'.format(self.name[:12], index)
-                    ifs = ip.link_lookup(ifname=ifname)
-                    if len(ifs) > 0:
-                       ip.link_remove(ifs[0])
-                    peer_ifname = '{0}-{1}'.format(pid, index)
-                    ip.link_create(ifname=ifname, kind='veth', peer=peer_ifname)
-                    host = ip.link_lookup(ifname=ifname)[0]
-                    ip.link('set', index=host, master=br)
-                    ip.link('set', index=host, state='up')
-                    guest = ip.link_lookup(ifname=peer_ifname)[0]
-                    ip.link('set', index=guest, net_ns_fd=pid)
-                    with Namespace(pid, 'net'):
-                        ip = IPRoute()
-                        ip.link('set', index=guest, ifname='eth{}'.format(index+1))
-                        ip.addr('add', index=guest, address=quagga_config['ip'], mask=quagga_config['mask'])
-                        ip.link('set', index=guest, state='up')
-                    index += 1
-        finally:
-            self.CONFIG_LOCK.release()
-
-    def execute(self, cmd, tty = True, stream = False, shell = False, detach = True):
-        res = 0
-        if type(cmd) == str:
-            cmds = (cmd,)
-        else:
-            cmds = cmd
-        if shell:
-            for c in cmds:
-                res += os.system('docker exec {0} {1}'.format(self.name, c))
-            return res
-        for c in cmds:
-            i = self.dckr.exec_create(container=self.name, cmd=c, tty = tty, privileged = True)
-            s = self.dckr.exec_start(i['Id'], stream = stream, detach=detach, socket=True)
-            try:
-                s.close()
-            except: pass
-            result = self.dckr.exec_inspect(i['Id'])
-            res += 0 if result['ExitCode'] == None else result['ExitCode']
-        return res
-
-    def restart(self, timeout =10):
-        return self.dckr.restart(self.name, timeout)
-
-def get_mem(jvm_heap_size = None, instances = 1):
-    if instances <= 0:
-        instances = 1
-    heap_size = jvm_heap_size
-    heap_size_i = 0
-    #sanitize the heap size config
-    if heap_size is not None:
-        if not heap_size.isdigit():
-            try:
-                heap_size_i = int(heap_size[:-1])
-                suffix = heap_size[-1]
-                if suffix == 'M':
-                    heap_size_i /= 1024 #convert to gigs
-                    #allow to specific minimum heap size
-                    if heap_size_i == 0:
-                        return heap_size
-            except:
-                ##invalid suffix length probably. Fall back to default
-                heap_size = None
-        else:
-            heap_size_i = int(heap_size)
-
-    with open('/proc/meminfo', 'r') as fd:
-        meminfo = fd.readlines()
-        mem = 0
-        for m in meminfo:
-            if m.startswith('MemTotal:') or m.startswith('SwapTotal:'):
-                mem += int(m.split(':')[1].strip().split()[0])
-
-        mem = max(mem/1024/1024/2/instances, 1)
-        mem = min(mem, 16)
-
-    if heap_size_i:
-        #we take the minimum of the provided heap size and max allowed heap size
-        heap_size_i = min(heap_size_i, mem)
-    else:
-        heap_size_i = mem
-
-    return '{}G'.format(heap_size_i)
-
-class OnosCord(Container):
-    """Use this when running the cord tester agent on the onos compute node"""
-    onos_config_dir_guest = '/root/onos/config'
-    synchronizer_map = { 'vtn' : { 'install':
-                      ('http://mavenrepo:8080/repository/org/opencord/cord-config/1.3.0-SNAPSHOT/cord-config-1.3.0-SNAPSHOT.oar',
-                       'http://mavenrepo:8080/repository/org/opencord/cord-config/1.4.0-SNAPSHOT/cord-config-1.4.0-SNAPSHOT.oar',
-                       'http://mavenrepo:8080/repository/org/opencord/vtn/1.3.0/vtn-1.3.0.oar',
-                       'http://mavenrepo:8080/repository/org/opencord/vtn/1.4.0-SNAPSHOT/vtn-1.4.0-SNAPSHOT.oar',),
-                                   'activate':
-                                   ('org.onosproject.ovsdb-base', 'org.onosproject.drivers.ovsdb',
-                                    'org.onosproject.dhcp', 'org.onosproject.optical-model',
-                                    'org.onosproject.openflow-base', 'org.onosproject.proxyarp',
-                                    'org.onosproject.hostprovider'),
-                                   },
-                         'fabric' : { 'activate':
-                                      ('org.onosproject.hostprovider', 'org.onosproject.optical-model',
-                                       'org.onosproject.openflow-base', 'org.onosproject.vrouter',
-                                       'org.onosproject.netcfghostprovider', 'org.onosproject.netcfglinksprovider',
-                                       'org.onosproject.segmentrouting', 'org.onosproject.proxyarp'),
-                                      }
-                         }
-    tester_apps = ('http://mavenrepo:8080/repository/org/opencord/aaa/1.4.0-SNAPSHOT/aaa-1.4.0-SNAPSHOT.oar',
-                   'http://mavenrepo:8080/repository/org/opencord/igmp/1.4.0-SNAPSHOT/igmp-1.4.0-SNAPSHOT.oar',)
-
-    old_service_profile = '/opt/cord/orchestration/service-profile/cord-pod'
-    cord_profile = '/opt/cord_profile'
-
-    def __init__(self, onos_ip, conf, service_profile, synchronizer, start = True, boot_delay = 5, skip = False):
-        if not skip:
-            if not os.access(conf, os.F_OK):
-                raise Exception('ONOS cord configuration location %s is invalid' %conf)
-            self.old_cord = False
-            if os.access(self.old_service_profile, os.F_OK):
-                self.old_cord = True
-            self.onos_ip = onos_ip
-            self.onos_cord_dir = conf
-            self.boot_delay = boot_delay
-            self.synchronizer = synchronizer
-            self.service_profile = service_profile
-            self.docker_yaml = os.path.join(conf, 'docker-compose.yml')
-            self.docker_yaml_saved = os.path.join(conf, 'docker-compose.yml.saved')
-            self.onos_config_dir = os.path.join(conf, 'config')
-            self.onos_cfg_save_loc = os.path.join(conf, 'network-cfg.json.saved')
-            instance_active = False
-            #if we have a wrapper onos instance already active, back out
-            if os.access(self.onos_config_dir, os.F_OK) or os.access(self.docker_yaml_saved, os.F_OK):
-                instance_active = True
-            else:
-                if start is True:
-                    os.mkdir(self.onos_config_dir)
-                    shutil.copy(self.docker_yaml, self.docker_yaml_saved)
-
-            self.start_wrapper = instance_active is False and start is True
-            ##update the docker yaml with the config volume
-            with open(self.docker_yaml, 'r') as f:
-                yaml_config = yaml.load(f)
-                image = yaml_config['services'].keys()[0]
-                cord_conf_dir_basename = os.path.basename(self.onos_cord_dir.replace('-', '').replace('_', ''))
-                xos_onos_name = '{}_{}_1'.format(cord_conf_dir_basename, image)
-                if not yaml_config['services'][image].has_key('volumes'):
-                    yaml_config['services'][image]['volumes'] = []
-                volumes = yaml_config['services'][image]['volumes']
-                config_volumes = filter(lambda e: e.find(self.onos_config_dir_guest) >= 0, volumes)
-                if not config_volumes:
-                    config_volume = '{}:{}'.format(self.onos_config_dir, self.onos_config_dir_guest)
-                    volumes.append(config_volume)
-                    if self.start_wrapper:
-                        docker_yaml_changed = '{}-changed'.format(self.docker_yaml)
-                        with open(docker_yaml_changed, 'w') as wf:
-                            yaml.dump(yaml_config, wf)
-                        os.rename(docker_yaml_changed, self.docker_yaml)
-                self.volumes = volumes
-
-            ##Create an container instance of xos onos
-            super(OnosCord, self).__init__(xos_onos_name, image, tag = '', quagga_config = Onos.QUAGGA_CONFIG)
-            self.last_cfg = None
-            if self.start_wrapper:
-                #fetch the current config of onos cord instance and save it
-                try:
-                    self.last_cfg = OnosCtrl.get_config(controller = onos_ip)
-                    json_data = json.dumps(self.last_cfg, indent=4)
-                    with open(self.onos_cfg_save_loc, 'w') as f:
-                        f.write(json_data)
-                except:
-                    pass
-                #start the container back with the shared onos config volume
-                self.start()
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 30:
-            cli = OnosCliDriver(controller = self.onos_ip, connect = True)
-            if cli.handle:
-                return cli
-            else:
-                retries += 1
-                time.sleep(3)
-
-        return None
-
-    def cliExit(self, cli):
-        if cli:
-            cli.disconnect()
-
-    def synchronize_fabric(self, cfg = None):
-        if self.old_cord is True:
-            cmds = [ 'cd {} && make {}'.format(self.old_service_profile, self.synchronizer),
-                     'sleep 30'
-                     ]
-            for cmd in cmds:
-                try:
-                    os.system(cmd)
-                except:
-                    pass
-
-    def synchronize_vtn(self, cfg = None):
-        if self.old_cord is True:
-            cmds = [ 'cd {} && make {}'.format(self.old_service_profile, self.synchronizer),
-                     'sleep 30'
-                     ]
-            for cmd in cmds:
-                try:
-                    os.system(cmd)
-                except:
-                    pass
-            return
-        if cfg is None:
-            return
-        if not cfg.has_key('apps'):
-            return
-        if not cfg['apps'].has_key('org.opencord.vtn'):
-            return
-        vtn_neutron_cfg = cfg['apps']['org.opencord.vtn']['cordvtn']['openstack']
-        password = vtn_neutron_cfg['password']
-        endpoint = vtn_neutron_cfg['endpoint']
-        user = vtn_neutron_cfg['user']
-        tenant = vtn_neutron_cfg['tenant']
-        vtn_host = cfg['apps']['org.opencord.vtn']['cordvtn']['nodes'][0]['hostname']
-        cli = self.cliEnter()
-        if cli is None:
-            return
-        cli.cordVtnSyncNeutronStates(endpoint, password, tenant = tenant, user = user)
-        time.sleep(2)
-        cli.cordVtnNodeInit(vtn_host)
-        self.cliExit(cli)
-
-    def synchronize(self, cfg_unlink = False):
-
-        if not self.synchronizer_map.has_key(self.synchronizer):
-            return
-
-        install_list = ()
-        if self.synchronizer_map[self.synchronizer].has_key('install'):
-            install_list = self.synchronizer_map[self.synchronizer]['install']
-
-        activate_list = ()
-        if self.synchronizer_map[self.synchronizer].has_key('activate'):
-            activate_list = self.synchronizer_map[self.synchronizer]['activate']
-
-        for app_url in install_list:
-            print('Installing app from url: %s' %app_url)
-            OnosCtrl.install_app_from_url(None, None, app_url = app_url, onos_ip = self.onos_ip)
-
-        for app in activate_list:
-            print('Activating app %s' %app)
-            OnosCtrl(app, controller = self.onos_ip).activate()
-            time.sleep(2)
-
-        for app_url in self.tester_apps:
-            print('Installing tester app from url: %s' %app_url)
-            OnosCtrl.install_app_from_url(None, None, app_url = app_url, onos_ip = self.onos_ip)
-
-        cfg = None
-        #restore the saved config after applications are activated
-        if os.access(self.onos_cfg_save_loc, os.F_OK):
-            with open(self.onos_cfg_save_loc, 'r') as f:
-                cfg = json.load(f)
-                try:
-                    OnosCtrl.config(cfg, controller = self.onos_ip)
-                    if cfg_unlink is True:
-                        os.unlink(self.onos_cfg_save_loc)
-                except:
-                    pass
-
-        if hasattr(self, 'synchronize_{}'.format(self.synchronizer)):
-            getattr(self, 'synchronize_{}'.format(self.synchronizer))(cfg = cfg)
-
-        #now restart the xos synchronizer container
-        cmd = None
-        if os.access('{}/onboarding-docker-compose/docker-compose.yml'.format(self.cord_profile), os.F_OK):
-            cmd = 'cd {}/onboarding-docker-compose && \
-            docker-compose -p {} restart xos_synchronizer_{}'.format(self.cord_profile,
-                                                                     self.service_profile,
-                                                                     self.synchronizer)
-        else:
-            if os.access('{}/docker-compose.yml'.format(self.cord_profile), os.F_OK):
-                cmd = 'cd {} && \
-                docker-compose -p {} restart {}-synchronizer'.format(self.cord_profile,
-                                                                     self.service_profile,
-                                                                     self.synchronizer)
-        if cmd is not None:
-            try:
-                print(cmd)
-                os.system(cmd)
-            except:
-                pass
-
-    def start(self, restart = False, network_cfg = None):
-        if network_cfg is not None:
-            json_data = json.dumps(network_cfg, indent=4)
-            with open('{}/network-cfg.json'.format(self.onos_config_dir), 'w') as f:
-                f.write(json_data)
-
-        #we avoid using docker-compose restart for now.
-        #since we don't want to retain the metadata across restarts
-        #stop and start and synchronize the services before installing tester cord apps
-        cmds = [ 'cd {} && docker-compose down'.format(self.onos_cord_dir),
-                 'cd {} && docker-compose up -d'.format(self.onos_cord_dir),
-                 'sleep 150',
-        ]
-        for cmd in cmds:
-            try:
-                print(cmd)
-                os.system(cmd)
-            except:pass
-
-        self.synchronize()
-        ##we could also connect container to default docker network but disabled for now
-        #Container.connect_to_network(self.name, 'bridge')
-        #connect container to the quagga bridge
-        self.connect_to_br(index = 0)
-        print('Waiting %d seconds for ONOS instance to start' %self.boot_delay)
-        time.sleep(self.boot_delay)
-
-    def build_image(self):
-        build_cmd = 'cd {} && docker-compose build'.format(self.onos_cord_dir)
-        os.system(build_cmd)
-
-    def restore(self, force = False):
-        restore = self.start_wrapper is True or force is True
-        if not restore:
-            return
-        #nothing to restore
-        if not os.access(self.docker_yaml_saved, os.F_OK):
-            return
-
-        #restore the config files back. The synchronizer restore should bring the last config back
-        cmds = ['cd {} && docker-compose down'.format(self.onos_cord_dir),
-                'rm -rf {}'.format(self.onos_config_dir),
-                'mv {} {}'.format(self.docker_yaml_saved, self.docker_yaml),
-                'cd {} && docker-compose up -d'.format(self.onos_cord_dir),
-                'sleep 150',
-        ]
-        for cmd in cmds:
-            try:
-                print(cmd)
-                os.system(cmd)
-            except: pass
-
-        self.synchronize(cfg_unlink = True)
-
-class OnosCordStopWrapper(Container):
-    onos_cord_dir = os.path.join(os.getenv('HOME'), 'cord-tester-cord')
-    docker_yaml = os.path.join(onos_cord_dir, 'docker-compose.yml')
-
-    def __init__(self):
-        if os.access(self.docker_yaml, os.F_OK):
-            with open(self.docker_yaml, 'r') as f:
-                yaml_config = yaml.load(f)
-                image = yaml_config['services'].keys()[0]
-                name = 'cordtestercord_{}_1'.format(image)
-            super(OnosCordStopWrapper, self).__init__(name, image, tag = '')
-            if self.exists():
-                print('Killing container %s' %self.name)
-                self.kill()
-
-class Onos(Container):
-    QUAGGA_CONFIG = [ { 'bridge' : 'quagga-br', 'ip': '10.10.0.4', 'mask' : 16 }, ]
-    MAX_INSTANCES = 3
-    JVM_HEAP_SIZE = None
-    SYSTEM_MEMORY = (get_mem(),) * 2
-    INSTANCE_MEMORY = (get_mem(instances=MAX_INSTANCES),) * 2
-    JAVA_OPTS_FORMAT = '-Xms{} -Xmx{} -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode'
-    JAVA_OPTS_DEFAULT = JAVA_OPTS_FORMAT.format(*SYSTEM_MEMORY) #-XX:+PrintGCDetails -XX:+PrintGCTimeStamps'
-    JAVA_OPTS_CLUSTER_DEFAULT = JAVA_OPTS_FORMAT.format(*INSTANCE_MEMORY)
-    env = { 'ONOS_APPS' : 'drivers,openflow,proxyarp,vrouter,hostprovider', 'JAVA_OPTS' : JAVA_OPTS_DEFAULT }
-    onos_cord_apps = ( ['cord-config', '1.2-SNAPSHOT', 'org.opencord.config'],
-                       ['sadis-app', '3.0-SNAPSHOT', 'org.opencord.sadis'],
-                       ['olt-app', '1.2-SNAPSHOT', 'org.onosproject.olt'],
-                       ['aaa', '1.2-SNAPSHOT', 'org.opencord.aaa'],
-                       ['igmp', '1.2-SNAPSHOT', 'org.opencord.igmp'],
-                       )
-    cord_apps_version_updated = False
-    expose_port = False
-    expose_ports = [ 8181, 8101, 9876, 6653, 6633, 2000, 2620, 5005 ]
-    ports = []
-    setup_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup')
-    host_config_dir = os.path.join(setup_dir, 'onos-config')
-    guest_config_dir = '/root/onos/config'
-    guest_data_dir = '/root/onos/apache-karaf-3.0.8/data'
-    guest_log_file = '/root/onos/apache-karaf-3.0.8/data/log/karaf.log'
-    onos_gen_partitions = os.path.join(setup_dir, 'onos-gen-partitions')
-    onos_form_cluster = os.path.join(setup_dir, 'onos-form-cluster')
-    cord_apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'apps')
-    host_guest_map = ( (host_config_dir, guest_config_dir), )
-    ssl_key = None
-    cluster_cfg = os.path.join(host_config_dir, 'cluster.json')
-    cluster_mode = False
-    cluster_instances = []
-    NAME = 'cord-onos'
-    ##the ip of ONOS in default cluster.json in setup/onos-config
-    CLUSTER_CFG_IP = '172.17.0.2'
-    IMAGE = 'onosproject/onos'
-    TAG = 'latest'
-    PREFIX = ''
-
-    @classmethod
-    def generate_cluster_cfg(cls, ip):
-        if type(ip) in [ list, tuple ]:
-            ips = ' '.join(ip)
-        else:
-            ips = ip
-        try:
-            cmd = '{} {} {}'.format(cls.onos_gen_partitions, cls.cluster_cfg, ips)
-            os.system(cmd)
-        except: pass
-
-    @classmethod
-    def form_cluster(cls, ips):
-        nodes = ' '.join(ips)
-        try:
-            cmd = '{} {}'.format(cls.onos_form_cluster, nodes)
-            os.system(cmd)
-        except: pass
-
-    @classmethod
-    def cleanup_runtime(cls):
-        '''Cleanup ONOS runtime generated files'''
-        files = ( Onos.cluster_cfg, os.path.join(Onos.host_config_dir, 'network-cfg.json') )
-        for f in files:
-            if os.access(f, os.F_OK):
-                try:
-                    os.unlink(f)
-                except: pass
-
-    @classmethod
-    def get_data_map(cls, host_volume, guest_volume_dir):
-        host_volume_dir = os.path.join(cls.setup_dir, os.path.basename(host_volume))
-        if not os.path.exists(host_volume_dir):
-            os.mkdir(host_volume_dir)
-        return ( (host_volume_dir, guest_volume_dir), )
-
-    @classmethod
-    def remove_data_map(cls, host_volume, guest_volume_dir):
-        host_volume_dir = os.path.join(cls.setup_dir, os.path.basename(host_volume))
-        if os.path.exists(host_volume_dir):
-            shutil.rmtree(host_volume_dir)
-
-    @classmethod
-    def update_data_dir(cls, karaf):
-        Onos.guest_data_dir = '/root/onos/apache-karaf-{}/data'.format(karaf)
-        Onos.guest_log_file = '/root/onos/apache-karaf-{}/data/log/karaf.log'.format(karaf)
-
-    @classmethod
-    def update_ssl_key(cls, key):
-        if os.access(key, os.F_OK):
-            try:
-                shutil.copy(key, cls.host_config_dir)
-                cls.ssl_key = os.path.join(cls.host_config_dir, os.path.basename(key))
-            except:pass
-
-    @classmethod
-    def set_expose_port(cls, flag):
-        cls.expose_port = flag
-
-    def get_port_map(self, instance=0):
-        if self.expose_port is False:
-            return self.ports
-        return map(lambda p: (p, p + instance), self.expose_ports)
-
-    def remove_data_volume(self):
-        if self.data_map is not None:
-            self.remove_data_map(*self.data_map)
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX, tag = TAG,
-                 boot_delay = 20, restart = False, network_cfg = None,
-                 cluster = False, data_volume = None, async = False, quagga_config = None,
-                 network = None, instance = 0):
-        if restart is True:
-            ##Find the right image to restart
-            running_image = filter(lambda c: c['Names'][0] == '/{}'.format(name), self.dckr.containers())
-            if running_image:
-                image_name = running_image[0]['Image']
-                try:
-                    image = image_name.split(':')[0]
-                    tag = image_name.split(':')[1]
-                except: pass
-
-        if quagga_config is None:
-            quagga_config = Onos.QUAGGA_CONFIG
-        super(Onos, self).__init__(name, image, prefix = prefix, tag = tag, quagga_config = quagga_config)
-        self.boot_delay = boot_delay
-        self.data_map = None
-        instance_memory = (get_mem(jvm_heap_size = Onos.JVM_HEAP_SIZE, instances = Onos.MAX_INSTANCES),) * 2
-        self.env['JAVA_OPTS'] = self.JAVA_OPTS_FORMAT.format(*instance_memory)
-        self.ports = self.get_port_map(instance = instance)
-        if self.ssl_key:
-            key_files = ( os.path.join(self.guest_config_dir, os.path.basename(self.ssl_key)), ) * 2
-            self.env['JAVA_OPTS'] += ' -DenableOFTLS=true -Djavax.net.ssl.keyStore={} -Djavax.net.ssl.keyStorePassword=222222 -Djavax.net.ssl.trustStore={} -Djavax.net.ssl.trustStorePassword=222222'.format(*key_files)
-        if cluster is True:
-            if data_volume is not None:
-                self.data_map = self.get_data_map(data_volume, self.guest_data_dir)
-                self.host_guest_map = self.host_guest_map + self.data_map
-            if os.access(self.cluster_cfg, os.F_OK):
-                try:
-                    os.unlink(self.cluster_cfg)
-                except: pass
-
-        self.host_config = self.create_host_config(port_list = self.ports,
-                                                   host_guest_map = self.host_guest_map)
-        self.volumes = []
-        for _,g in self.host_guest_map:
-            self.volumes.append(g)
-
-        if restart is True and self.exists():
-            self.kill()
-
-        if not self.exists():
-            self.remove_container(name, force=True)
-            host_config = self.create_host_config(port_list = self.ports,
-                                                  host_guest_map = self.host_guest_map)
-            volumes = []
-            for _,g in self.host_guest_map:
-                volumes.append(g)
-            if network_cfg is not None:
-                json_data = json.dumps(network_cfg, indent=4)
-                with open('{}/network-cfg.json'.format(self.host_config_dir), 'w') as f:
-                    f.write(json_data)
-            if cluster is False or async is False:
-                print('Starting ONOS container %s' %self.name)
-                self.start(ports = self.ports, environment = self.env,
-                           host_config = self.host_config, volumes = self.volumes, tty = True,
-                           network = Radius.NETWORK)
-                if not restart:
-                    ##wait a bit before fetching IP to regenerate cluster cfg
-                    time.sleep(5)
-                    ip = self.ip()
-                    ##Just a quick hack/check to ensure we don't regenerate in the common case.
-                    ##As ONOS is usually the first test container that is started
-                    if cluster is False:
-                        if ip != self.CLUSTER_CFG_IP or not os.access(self.cluster_cfg, os.F_OK):
-                            print('Regenerating ONOS cluster cfg for ip %s' %ip)
-                            self.generate_cluster_cfg(ip)
-                            self.kill()
-                            self.remove_container(self.name, force=True)
-                            print('Restarting ONOS container %s' %self.name)
-                            self.start(ports = self.ports, environment = self.env,
-                                       host_config = self.host_config, volumes = self.volumes, tty = True,
-                                       network = Radius.NETWORK)
-                print('Waiting for ONOS to boot')
-                time.sleep(boot_delay)
-                self.wait_for_onos_start(self.ip())
-                self.running = True
-            else:
-                self.running = False
-        else:
-            self.running = True
-        if self.running:
-            self.ipaddr = self.ip()
-            if cluster is False:
-                self.install_cord_apps(self.ipaddr)
-
-    @classmethod
-    def get_quagga_config(cls, instance = 0):
-        quagga_config = copy.deepcopy(cls.QUAGGA_CONFIG)
-        if instance == 0:
-            return quagga_config
-        ip = quagga_config[0]['ip']
-        octets = ip.split('.')
-        octets[3] = str((int(octets[3]) + instance) & 255)
-        ip = '.'.join(octets)
-        quagga_config[0]['ip'] = ip
-        return quagga_config
-
-    @classmethod
-    def start_cluster_async(cls, onos_instances):
-        instances = filter(lambda o: o.running == False, onos_instances)
-        if not instances:
-            return
-        tpool = ThreadPool(len(instances), queue_size = 1, wait_timeout = 1)
-        for onos in instances:
-            tpool.addTask(onos.start_async)
-        tpool.cleanUpThreads()
-
-    def start_async(self):
-        print('Starting ONOS container %s' %self.name)
-        self.start(ports = self.ports, environment = self.env,
-                   host_config = self.host_config, volumes = self.volumes, tty = True)
-        time.sleep(3)
-        self.ipaddr = self.ip()
-        print('Waiting for ONOS container %s to start' %self.name)
-        self.wait_for_onos_start(self.ipaddr)
-        self.running = True
-        print('ONOS container %s started' %self.name)
-
-    @classmethod
-    def wait_for_onos_start(cls, ip, tries = 30):
-        onos_log = OnosLog(host = ip, log_file = Onos.guest_log_file)
-        num_tries = 0
-        started = None
-        while not started and num_tries < tries:
-            time.sleep(3)
-            started = onos_log.search_log_pattern('ApplicationManager .* Started')
-            num_tries += 1
-
-        if not started:
-            print('ONOS did not start')
-        else:
-            print('ONOS started')
-        return started
-
-    @classmethod
-    def setup_cluster_deprecated(cls, onos_instances, image_name = None):
-        if not onos_instances or len(onos_instances) < 2:
-            return
-        ips = []
-        if image_name is not None:
-            ips = Container.ips(image_name)
-        else:
-            for onos in onos_instances:
-                ips.append(onos.ipaddr)
-        Onos.cluster_instances = onos_instances
-        Onos.cluster_mode = True
-        ##regenerate the cluster json with the 3 instance ips before restarting them back
-        print('Generating cluster cfg for ONOS instances with ips %s' %ips)
-        Onos.generate_cluster_cfg(ips)
-        for onos in onos_instances:
-            onos.kill()
-            onos.remove_container(onos.name, force=True)
-            print('Restarting ONOS container %s for forming cluster' %onos.name)
-            onos.start(ports = onos.ports, environment = onos.env,
-                       host_config = onos.host_config, volumes = onos.volumes, tty = True)
-            print('Waiting %d seconds for ONOS %s to boot' %(onos.boot_delay, onos.name))
-            time.sleep(onos.boot_delay)
-            onos.ipaddr = onos.ip()
-            onos.install_cord_apps(onos.ipaddr)
-
-    @classmethod
-    def setup_cluster(cls, onos_instances, image_name = None):
-        if not onos_instances or len(onos_instances) < 2:
-            return
-        ips = []
-        if image_name is not None:
-            ips = Container.ips(image_name)
-        else:
-            for onos in onos_instances:
-                ips.append(onos.ipaddr)
-        Onos.cluster_instances = onos_instances
-        Onos.cluster_mode = True
-        ##regenerate the cluster json with the 3 instance ips before restarting them back
-        print('Forming cluster for ONOS instances with ips %s' %ips)
-        Onos.form_cluster(ips)
-        ##wait for the cluster to be formed
-        print('Waiting for the cluster to be formed')
-        time.sleep(60)
-        for onos in onos_instances:
-            onos.install_cord_apps(onos.ipaddr)
-
-    @classmethod
-    def add_cluster(cls, count = 1, network_cfg = None):
-        if not cls.cluster_instances or Onos.cluster_mode is False:
-            return
-        for i in range(count):
-            instance = len(cls.cluster_instances)
-            name = '{}-{}'.format(Onos.NAME, instance+1)
-            onos = cls(name = name, image = Onos.IMAGE, tag = Onos.TAG, prefix = Container.IMAGE_PREFIX,
-                       cluster = True, network_cfg = network_cfg, instance = instance)
-            cls.cluster_instances.append(onos)
-
-        cls.setup_cluster(cls.cluster_instances)
-
-    @classmethod
-    def restart_cluster(cls, network_cfg = None, timeout = 10, setup = False):
-        if cls.cluster_mode is False:
-            return
-        if not cls.cluster_instances:
-            return
-
-        if network_cfg is not None:
-            json_data = json.dumps(network_cfg, indent=4)
-            with open('{}/network-cfg.json'.format(cls.host_config_dir), 'w') as f:
-                f.write(json_data)
-
-        cls.cleanup_cluster()
-        if timeout > 0:
-            time.sleep(timeout)
-
-        #start the instances asynchronously
-        cls.start_cluster_async(cls.cluster_instances)
-        time.sleep(5)
-        ##form the cluster as appropriate
-        if setup is True:
-            cls.setup_cluster(cls.cluster_instances)
-        else:
-            for onos in cls.cluster_instances:
-                onos.install_cord_apps(onos.ipaddr)
-
-    @classmethod
-    def cluster_ips(cls):
-        if cls.cluster_mode is False:
-            return []
-        if not cls.cluster_instances:
-            return []
-        ips = [ onos.ipaddr for onos in cls.cluster_instances ]
-        return ips
-
-    @classmethod
-    def cleanup_cluster(cls):
-        if cls.cluster_mode is False:
-            return
-        if not cls.cluster_instances:
-            return
-        for onos in cls.cluster_instances:
-            if onos.exists():
-                onos.kill()
-            onos.running = False
-            onos.remove_container(onos.name, force=True)
-
-    @classmethod
-    def restart_node(cls, node = None, network_cfg = None, timeout = 10):
-        if node is None:
-            cls(restart = True, network_cfg = network_cfg, image = cls.IMAGE, tag = cls.TAG)
-        else:
-            #Restarts a node in the cluster
-            valid_node = filter(lambda onos: node in [ onos.ipaddr, onos.name ], cls.cluster_instances)
-            if valid_node:
-                onos = valid_node.pop()
-                if onos.exists():
-                    onos.kill()
-                onos.remove_container(onos.name, force=True)
-                if timeout > 0:
-                    time.sleep(timeout)
-                print('Restarting ONOS container %s' %onos.name)
-                onos.start(ports = onos.ports, environment = onos.env,
-                           host_config = onos.host_config, volumes = onos.volumes, tty = True,
-                           network = Radius.NETWORK)
-                onos.ipaddr = onos.ip()
-                onos.wait_for_onos_start(onos.ipaddr)
-                onos.install_cord_apps(onos.ipaddr)
-
-    @classmethod
-    def cliEnter(cls, onos_ip = None):
-        retries = 0
-        while retries < 10:
-            cli = OnosCliDriver(controller = onos_ip, connect = True)
-            if cli.handle:
-                return cli
-            else:
-                retries += 1
-                time.sleep(3)
-
-        return None
-
-    @classmethod
-    def cliExit(cls, cli):
-        if cli:
-            cli.disconnect()
-
-    @classmethod
-    def getVersion(cls, onos_ip = None):
-        cli = cls.cliEnter(onos_ip = onos_ip)
-        try:
-            summary = json.loads(cli.summary(jsonFormat = True))
-        except:
-            cls.cliExit(cli)
-            return '1.8.0'
-        cls.cliExit(cli)
-        return summary['version']
-
-    @classmethod
-    def update_cord_apps_version(cls, onos_ip = None):
-        if cls.cord_apps_version_updated == True:
-            return
-        version = cls.getVersion(onos_ip = onos_ip)
-        major = int(version.split('.')[0])
-        minor = int(version.split('.')[1])
-        try:
-            patch = int(version.split('.')[2])
-        except:
-            patch = 0
-        app_version = '1.2-SNAPSHOT'
-        if major > 1:
-            app_version = '3.0-SNAPSHOT'
-        elif major == 1 and minor >= 10:
-            app_version = '3.0-SNAPSHOT'
-            if minor == 10 and patch < 3:
-                app_version = '1.2-SNAPSHOT'
-        for apps in cls.onos_cord_apps:
-            apps[1] = app_version
-        cls.cord_apps_version_updated = True
-
-    @classmethod
-    def install_cord_apps(cls, onos_ip = None):
-        cls.update_cord_apps_version(onos_ip = onos_ip)
-        for app, version,_ in cls.onos_cord_apps:
-            app_file = '{}/{}-{}.oar'.format(cls.cord_apps_dir, app, version)
-            ok, code = OnosCtrl.install_app(app_file, onos_ip = onos_ip)
-            ##app already installed (conflicts)
-            if code in [ 409 ]:
-                ok = True
-            print('ONOS app %s, version %s %s' %(app, version, 'installed' if ok else 'failed to install'))
-            time.sleep(2)
-
-        OnosCtrl.config_olt_component(controller = onos_ip)
-
-    @classmethod
-    def activate_apps(cls, apps, onos_ip = None, deactivate = False):
-        for app in apps:
-            if deactivate is True:
-                OnosCtrl(app, controller = onos_ip).deactivate()
-                time.sleep(2)
-            OnosCtrl(app, controller = onos_ip).activate()
-
-        time.sleep(5)
-
-    @classmethod
-    def activate_cord_apps(cls, onos_ip = None, deactivate = True):
-        cord_apps = map(lambda a: a[2], cls.onos_cord_apps)
-        cls.activate_apps(cord_apps, onos_ip = onos_ip, deactivate = deactivate)
-
-class OnosStopWrapper(Container):
-    def __init__(self, name):
-        super(OnosStopWrapper, self).__init__(name, Onos.IMAGE, tag = Onos.TAG, prefix = Container.IMAGE_PREFIX)
-        if self.exists():
-            self.kill()
-            self.running = False
-        else:
-            if Onos.cluster_mode is True:
-                valid_node = filter(lambda onos: name in [ onos.ipaddr, onos.name ], Onos.cluster_instances)
-                if valid_node:
-                    onos = valid_node.pop()
-                    if onos.exists():
-                        onos.kill()
-                    onos.running = False
-
-class Radius(Container):
-    ports = [ 1812, 1813 ]
-    env = {'TIMEZONE':'America/Los_Angeles',
-           'DEBUG': 'true', 'cert_password':'whatever', 'primary_shared_secret':'radius_password'
-           }
-    host_db_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/radius-config/db')
-    guest_db_dir = os.path.join(os.path.sep, 'opt', 'db')
-    host_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/radius-config/freeradius')
-    guest_config_dir = os.path.join(os.path.sep, 'etc', 'freeradius')
-    start_command = os.path.join(guest_config_dir, 'start-radius.py')
-    host_guest_map = ( (host_db_dir, guest_db_dir),
-                       (host_config_dir, guest_config_dir)
-                       )
-    IMAGE = 'cordtest/radius'
-    NAME = 'cord-radius'
-    NETWORK = 'cord-radius-test'
-    SOCKET_SUBNET = '11.0.0.0/24'
-    SOCKET_SUBNET_PREFIX = '11.0.0'
-    SOCKET_GATEWAY = '11.0.0.1'
-
-    @classmethod
-    def create_network(cls, name = NETWORK):
-        try:
-            Container.create_network(name, subnet = cls.SOCKET_SUBNET, gateway = cls.SOCKET_GATEWAY)
-        except:
-            pass
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = 'candidate',
-                 boot_delay = 10, restart = False, update = False, network = None,
-                 network_disabled = False, olt_config = ''):
-        super(Radius, self).__init__(name, image, prefix = prefix, tag = tag, command = self.start_command)
-        if update is True or not self.img_exists():
-            self.build_image(self.image_name)
-        if restart is True and self.exists():
-            self.kill()
-        else:
-            subscribers = 10
-            if olt_config:
-                port_map, _ = OltConfig(olt_config).olt_port_map()
-                if port_map:
-                    subscribers = port_map['num_ports'] * len(port_map['switch_port_list'])
-            radius_restore_users()
-            radius_add_users(subscribers)
-        if not self.exists():
-            self.remove_container(name, force=True)
-            host_config = self.create_host_config(port_list = self.ports,
-                                                  host_guest_map = self.host_guest_map,
-                                                  privileged = True)
-            volumes = []
-            for _,g in self.host_guest_map:
-                volumes.append(g)
-            self.start(ports = self.ports, environment = self.env,
-                       volumes = volumes,
-                       host_config = host_config, tty = True, network_disabled = network_disabled)
-            if network_disabled is False:
-                Container.connect_to_network(self.name, self.NETWORK)
-            time.sleep(boot_delay)
-
-    @classmethod
-    def build_image(cls, image):
-        print('Building Radius image %s' %image)
-        dockerfile = '''
-FROM hbouvier/docker-radius
-MAINTAINER chetan@ciena.com
-LABEL RUN docker pull hbouvier/docker-radius
-LABEL RUN docker run -it --name cord-radius hbouvier/docker-radius
-RUN apt-get update && \
-    apt-get -y install python python-pexpect strace
-WORKDIR /root
-CMD ["/etc/freeradius/start-radius.py"]
-'''
-        super(Radius, cls).build_image(dockerfile, image)
-        print('Done building image %s' %image)
-
-class Quagga(Container):
-    QUAGGA_CONFIG = ( { 'bridge' : 'quagga-br', 'ip': '10.10.0.3', 'mask' : 16 },
-                      { 'bridge' : 'quagga-br', 'ip': '192.168.10.3', 'mask': 16 },
-                      )
-    ports = [ 179, 2601, 2602, 2603, 2604, 2605, 2606 ]
-    host_quagga_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/quagga-config')
-    guest_quagga_config = '/root/config'
-    quagga_config_file = os.path.join(guest_quagga_config, 'testrib.conf')
-    host_guest_map = ( (host_quagga_config, guest_quagga_config), )
-    IMAGE = 'cordtest/quagga'
-    NAME = 'cord-quagga'
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = 'candidate',
-                 boot_delay = 15, restart = False, config_file = quagga_config_file, update = False,
-                 network = None):
-        super(Quagga, self).__init__(name, image, prefix = prefix, tag = tag, quagga_config = self.QUAGGA_CONFIG)
-        if update is True or not self.img_exists():
-            self.build_image(self.image_name)
-        if restart is True and self.exists():
-            self.kill()
-        if not self.exists():
-            self.remove_container(name, force=True)
-            host_config = self.create_host_config(port_list = self.ports,
-                                                  host_guest_map = self.host_guest_map,
-                                                  privileged = True)
-            volumes = []
-            for _,g in self.host_guest_map:
-                volumes.append(g)
-            self.start(ports = self.ports,
-                       host_config = host_config,
-                       volumes = volumes, tty = True)
-            if network is not None:
-                Container.connect_to_network(self.name, network)
-            print('Starting Quagga on container %s' %self.name)
-            self.execute('{0}/start.sh {1}'.format(self.guest_quagga_config, config_file))
-            time.sleep(boot_delay)
-
-    @classmethod
-    def build_image(cls, image):
-        onos_quagga_ip = Onos.QUAGGA_CONFIG[0]['ip']
-        print('Building Quagga image %s' %image)
-        dockerfile = '''
-FROM ubuntu:14.04
-MAINTAINER chetan@ciena.com
-WORKDIR /root
-RUN useradd -M quagga
-RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga
-RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga
-RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev pkg-config protobuf-c-compiler
-RUN git clone git://git.savannah.nongnu.org/quagga.git quagga && \
-(cd quagga && git checkout quagga-1.0.20160315 && ./bootstrap.sh && \
-sed -i -r 's,htonl.*?\(INADDR_LOOPBACK\),inet_addr\("{0}"\),g' zebra/zebra_fpm.c && \
-./configure --enable-fpm --disable-doc --localstatedir=/var/run/quagga && make && make install)
-RUN ldconfig
-'''.format(onos_quagga_ip)
-        super(Quagga, cls).build_image(dockerfile, image)
-        print('Done building image %s' %image)
-
-class QuaggaStopWrapper(Container):
-    def __init__(self, name = Quagga.NAME, image = Quagga.IMAGE, tag = 'candidate'):
-        super(QuaggaStopWrapper, self).__init__(name, image, prefix = Container.IMAGE_PREFIX, tag = tag)
-        if self.exists():
-            self.kill()
-
-
-def reinitContainerClients():
-    docker_netns.dckr = Client()
-    Container.dckr = Client()
-
-class Xos(Container):
-    setup_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup')
-    TAG = 'latest'
-    PREFIX = ''
-    host_guest_map = None
-    env = None
-    ports = None
-    volumes = None
-
-    @classmethod
-    def get_cmd(cls, img_name):
-        cmd = cls.dckr.inspect_image(img_name)['Config']['Cmd']
-        return ' '.join(cmd)
-
-    def __init__(self, name, image, prefix = PREFIX, tag = TAG,
-                 boot_delay = 20, restart = False, network_cfg = None, update = False):
-        if restart is True:
-            ##Find the right image to restart
-            running_image = filter(lambda c: c['Names'][0] == '/{}'.format(name), self.dckr.containers())
-            if running_image:
-                image_name = running_image[0]['Image']
-                try:
-                    image = image_name.split(':')[0]
-                    tag = image_name.split(':')[1]
-                except: pass
-        super(Xos, self).__init__(name, image, prefix = prefix, tag = tag)
-        if update is True or not self.img_exists():
-            self.build_image(self.image_name)
-        self.command = self.get_cmd(self.image_name).strip() or None
-        if restart is True and self.exists():
-            self.kill()
-        if not self.exists():
-            self.remove_container(name, force=True)
-            host_config = self.create_host_config(port_list = self.ports,
-                                                  host_guest_map = self.host_guest_map,
-                                                  privileged = True)
-            print('Starting XOS container %s' %self.name)
-            self.start(ports = self.ports, environment = self.env, host_config = host_config,
-                       volumes = self.volumes, tty = True)
-            print('Waiting %d seconds for XOS Base Container to boot' %(boot_delay))
-            time.sleep(boot_delay)
-
-    @classmethod
-    def build_image(cls, image, dockerfile_path, image_target = 'build'):
-        cmd = 'cd {} && make {}'.format(dockerfile_path, image_target)
-        print('Building XOS %s' %image)
-        res = os.system(cmd)
-        print('Done building image %s. Image build %s' %(image, 'successful' if res == 0 else 'failed'))
-        return res
-
-class XosServer(Xos):
-    ports = [8000,9998,9999]
-    NAME = 'xos-server'
-    IMAGE = 'xosproject/xos'
-    BASE_IMAGE = 'xosproject/xos-base'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'xos')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX, tag = TAG,
-                 boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        ##build the base image and then build the server image
-        Xos.build_image(cls.BASE_IMAGE, cls.dockerfile_path, image_target = 'base')
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSynchronizerOpenstack(Xos):
-    ports = [2375,]
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer')
-    NAME = 'xos-synchronizer'
-    IMAGE = 'xosproject/xos-synchronizer-openstack'
-    TAG = 'latest'
-    PREFIX = ''
-    host_guest_map = ( ('/usr/local/share/ca-certificates', '/usr/local/share/ca-certificates'),)
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX,
-                 tag = TAG, boot_delay = 20, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        XosServer.build_image()
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSynchronizerOnboarding(Xos):
-    NAME = 'xos-synchronizer-onboarding'
-    IMAGE = 'xosproject/xos-synchronizer-onboarding'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'onboarding_synchronizer')
-    host_guest_map = ( ('/usr/local/share/ca-certificates', '/usr/local/share/ca-certificates'),)
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX,
-                 tag = TAG, boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        XosSynchronizerOpenstack.build_image()
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSynchronizerOpenvpn(Xos):
-    NAME = 'xos-synchronizer-openvpn'
-    IMAGE = 'xosproject/xos-openvpn'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'openvpn')
-    host_guest_map = ( ('/usr/local/share/ca-certificates', '/usr/local/share/ca-certificates'),)
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX,
-                 tag = TAG, boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        XosSynchronizerOpenstack.build_image()
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosPostgresql(Xos):
-    ports = [5432,]
-    NAME = 'xos-db-postgres'
-    IMAGE = 'xosproject/xos-postgres'
-    TAG = 'latest'
-    PREFIX = ''
-    volumes = ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
-    dockerfile_path = os.path.join(Xos.setup_dir, 'postgresql')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = PREFIX,
-                 tag = TAG, boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSyndicateMs(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-syndicate-ms'
-    IMAGE = 'xosproject/syndicate-ms'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'syndicate-ms')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSyncVtn(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-synchronizer-vtn'
-    IMAGE = 'xosproject/xos-synchronizer-vtn'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer-vtn')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSyncVtr(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-synchronizer-vtr'
-    IMAGE = 'xosproject/xos-synchronizer-vtr'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer-vtr')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSyncVsg(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-synchronizer-vsg'
-    IMAGE = 'xosproject/xos-synchronizer-vsg'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer-vsg')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 10, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-
-class XosSyncOnos(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-synchronizer-onos'
-    IMAGE = 'xosproject/xos-synchronizer-onos'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer-onos')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 30, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-class XosSyncFabric(Xos):
-    ports = [8080,]
-    env = None
-    NAME = 'xos-synchronizer-fabric'
-    IMAGE = 'xosproject/xos-synchronizer-fabric'
-    TAG = 'latest'
-    PREFIX = ''
-    dockerfile_path = os.path.join(Xos.setup_dir, 'synchronizer-fabric')
-
-    def __init__(self, name = NAME, image = IMAGE, prefix = '', tag = TAG,
-                 boot_delay = 30, restart = False, network_cfg = None, update = False):
-        Xos.__init__(self, name, image, prefix, tag, boot_delay, restart, network_cfg, update)
-
-    @classmethod
-    def build_image(cls, image = IMAGE):
-        Xos.build_image(image, cls.dockerfile_path)
-
-if __name__ == '__main__':
-    onos = Onos(boot_delay = 10, restart = True)
diff --git a/src/test/utils/CordLogger.py b/src/test/utils/CordLogger.py
deleted file mode 100644
index 38517a1..0000000
--- a/src/test/utils/CordLogger.py
+++ /dev/null
@@ -1,257 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from OnosLog import OnosLog
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from CordTestUtils import log_test as log
-from onosclidriver import OnosCliDriver
-from OnosCtrl import OnosCtrl
-try:
-    from docker import APIClient as Client
-except:
-    from docker import Client
-from CordContainer import *
-import json
-import requests
-import unittest
-import os
-import time
-import warnings
-
-def get_controller_names(controllers):
-        controller_names = [ 'cord-onos' if controllers.index(c) == 0 else 'cord-onos-{}'.format(controllers.index(c)+1) for c in controllers ]
-        return controller_names
-
-def get_controller_map(controllers):
-        controller_map = ( ('cord-onos' if controllers.index(c) == 0 else 'cord-onos-{}'.format(controllers.index(c)+1),c) for c in controllers )
-        return dict(controller_map)
-
-class CordLogger(unittest.TestCase):
-
-    controllers = os.getenv('ONOS_CONTROLLER_IP', '').split(',')
-    controller_names = get_controller_names(controllers)
-    controller_map = get_controller_map(controllers)
-    cliSessions = {}
-    onosLogLevel = 'INFO'
-    curLogLevel = onosLogLevel
-    testLogLevel = os.getenv('LOG_LEVEL', onosLogLevel)
-    setup_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup')
-    archive_dir = os.path.join(setup_dir, 'test_logs')
-    onos_data_dir = os.path.join(setup_dir, 'cord-onos-data')
-
-    def __init__(self, *args, **kwargs):
-        warnings.simplefilter('ignore')
-        super(CordLogger, self).__init__(*args, **kwargs)
-
-    @classmethod
-    def cliSessionEnter(cls):
-        try:
-            for controller in cls.controllers:
-                if not controller:
-                    continue
-                retries = 0
-                while retries < 30:
-                    cli = OnosCliDriver(controller = controller, connect = True)
-                    if cli.handle:
-                        cls.cliSessions[controller] = cli
-                        break
-                    else:
-                        retries += 1
-                        time.sleep(2)
-        except:
-            pass
-
-    @classmethod
-    def cliSessionExit(cls):
-        try:
-            for controller, cli in cls.cliSessions.items():
-                if cli:
-                    cli.disconnect()
-        except:
-            pass
-
-    def setUp(self):
-        '''Read the log buffer'''
-        self.logSet()
-        try:
-            onosLog = OnosLog()
-            st, output = onosLog.get_log()
-        except: pass
-
-    def tearDown(self):
-        '''Dump the log buffer for ERRORS/warnings'''
-        #reset the log level back to default log level after a test
-        self.logSet(level = self.onosLogLevel)
-        try:
-            onosLog = OnosLog()
-            st, output = onosLog.get_log( ('ERROR','WARN') )
-            if st and output:
-                log.info('\nTest %s has errors and warnings\n' %self._testMethodName)
-                log.info('%s' %output)
-            else:
-                log.info('\nTest %s has no errors and warnings in the logs' %self._testMethodName)
-        except: pass
-        try:
-            self.archive_results(self._testMethodName)
-        except: pass
-
-    @classmethod
-    def archive_results(cls, testName, controllers = None, iteration = None, archive_partition = False):
-        if not os.path.exists(cls.onos_data_dir):
-            return cls.archive_results_unshared(testName, controllers = controllers, iteration = iteration)
-        if not os.path.exists(cls.archive_dir):
-            os.mkdir(cls.archive_dir)
-        if controllers is None:
-            controllers = cls.controllers
-            controller_map = cls.controller_map
-        else:
-            controller_map = get_controller_map(controllers)
-
-        iteration_str = '' if iteration is None else '_{}'.format(iteration)
-        if archive_partition is False:
-            archive_target = 'log'
-            tar_options = ''
-        else:
-            archive_target = ''
-            tar_options = '--exclude=cache --exclude=tmp'
-
-        for c in controller_map.keys():
-            archive_file = os.path.join(cls.archive_dir,
-                                        'logs_{}_{}{}.tar.gz'.format(controller_map[c], testName, iteration_str))
-            archive_path = os.path.join(cls.setup_dir, '{}-data'.format(c), archive_target)
-            cmd = 'cd {} && tar cvzf {} . {}'.format(archive_path, archive_file, tar_options)
-            try:
-                os.system(cmd)
-            except: pass
-
-    @classmethod
-    def archive_results_unshared(cls, testName, controllers = None, iteration = None, cache_result = False):
-        log_map = {}
-        if controllers is None:
-            controllers = cls.controllers
-        else:
-            if type(controllers) in [ str, unicode ]:
-                controllers = [ controllers ]
-        try:
-            for controller in controllers:
-                onosLog = OnosLog(host = controller)
-                st, output = onosLog.get_log(cache_result = cache_result)
-                log_map[controller] = (st, output)
-        except:
-            return
-
-        if not os.path.exists(cls.archive_dir):
-            os.mkdir(cls.archive_dir)
-        for controller, results in log_map.items():
-            st, output = results
-            if st and output:
-                iteration_str = '' if iteration is None else '_{}'.format(iteration)
-                archive_file = os.path.join(cls.archive_dir,
-                                            'logs_{}_{}{}'.format(controller, testName, iteration_str))
-                archive_cmd = 'gzip -9 -f {}'.format(archive_file)
-                if os.access(archive_file, os.F_OK):
-                    os.unlink(archive_file)
-                with open(archive_file, 'w') as fd:
-                    fd.write(output)
-                try:
-                    os.system(archive_cmd)
-                except: pass
-
-    @classmethod
-    def logSet(cls, level = None, app = 'org.onosproject', controllers = None, forced = False):
-        #explicit override of level is allowed to reset log levels
-        if level is None:
-            level = cls.testLogLevel
-        #if we are already at current/ONOS log level, there is nothing to do
-        if forced is False and level == cls.curLogLevel:
-            return
-        if controllers is None:
-            controllers = cls.controllers
-        else:
-            if type(controllers) in [str, unicode]:
-                controllers = [ controllers ]
-        cls.cliSessionEnter()
-        try:
-            for controller in controllers:
-                if cls.cliSessions.has_key(controller):
-                    cls.cliSessions[controller].logSet(level = level, app = app)
-            cls.curLogLevel = level
-        except:
-            pass
-        cls.cliSessionExit()
-
-    @classmethod
-    def stat_option(cls, stat = None, serverDetails = None):
-        # each stat option we can do some specific functions
-        if stat is None:
-           stat = cls.statOptionsList
-        if serverDetails is None:
-           serverDetails = cls.serverOptionsList
-        stat_choice = 'COLLECTD'
-        test_name = cls.testHostName
-        test_image = 'cordtest/nose'
-        if stat_choice in stat:
-           onos_ctrl = OnosCtrl('org.onosproject.cpman')
-           status, _ = onos_ctrl.activate()
-           if serverDetails is '':
-              ## default Test Container is used to install CollectD
-              pass
-           elif serverDetails in 'NEW':
-                test_image = 'cord-test/exserver'
-                test_name ='cord-collectd'
-           else:
-               pass
-               # cls.connect_server(serverDetails)
-               ## TO-DO for already up and running server, install collectd agent etc...
-           cls.start_collectd_agent_in_server(name = test_name, image = test_image)
-           for controller in cls.controllers:
-               if not controller:
-                  continue
-               url_mem_stats =  'http://%s:8181/onos/cpman/controlmetrics/memory_metrics'%(controller)
-               url_cpu_stats =  'http://%s:8181/onos/cpman/controlmetrics/cpu_metrics'%(controller)
-               auth = ('karaf', 'karaf')
-               cls.collectd_agent_metrics(controller, auth, url = url_cpu_stats)
-               cls.collectd_agent_metrics(controller, auth, url = url_mem_stats)
-        return
-
-
-    @classmethod
-    def collectd_agent_metrics(cls,controller=None, auth =None, url = None):
-        '''This function is getting rules from ONOS with json format'''
-        if url:
-           resp = requests.get(url, auth = auth)
-           log.info('Collectd agent has provided metrics via ONOS controller, url = %s \nand status = %s' %(url,resp.json()))
-        return resp
-
-
-    @classmethod
-    def start_collectd_agent_in_server(cls, name = None, image = None):
-        container_cmd_exec = Container(name = name, image = image)
-        tty = False
-        dckr = Client()
-        cmd =  'sudo /etc/init.d/collectd start'
-        i = container_cmd_exec.execute(cmd = cmd, tty= tty, stream = True)
-        return
-
-    @classmethod
-    def disable_onos_apps(cls, stat = None, app = None):
-        stat_choice = 'COLLECTD'
-        if stat is None:
-           stat = cls.statOptionsList
-        if stat_choice in stat:
-            onos_ctrl = OnosCtrl('org.onosproject.cpman')
-            status, _ = onos_ctrl.deactivate()
diff --git a/src/test/utils/CordSubscriberUtils.py b/src/test/utils/CordSubscriberUtils.py
deleted file mode 100644
index 72cc9b5..0000000
--- a/src/test/utils/CordSubscriberUtils.py
+++ /dev/null
@@ -1,409 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import sys
-import time
-from nose.tools import *
-from CordTestUtils import log_test as log
-from OnosCtrl import OnosCtrl
-
-class XosUtils(object):
-
-    head_node = os.getenv('HEAD_NODE', 'head1')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    CONTROLLER_PORT = '9000'
-    our_path = os.path.dirname(os.path.realpath(__file__))
-    cord_api_path = os.path.join(our_path, '..', 'cord-api')
-    framework_path = os.path.join(cord_api_path, 'Framework')
-    utils_path = os.path.join(framework_path, 'utils')
-    sys.path.append(utils_path)
-    sys.path.append(framework_path)
-
-    @classmethod
-    def getCredentials(cls):
-        onos_cfg = OnosCtrl.get_config()
-        if onos_cfg is None:
-            return None
-        if 'apps' in onos_cfg and \
-           'org.opencord.vtn' in onos_cfg['apps'] and \
-           'cordvtn' in onos_cfg['apps']['org.opencord.vtn'] and \
-           'xos' in onos_cfg['apps']['org.opencord.vtn']['cordvtn']:
-            xos_cfg = onos_cfg['apps']['org.opencord.vtn']['cordvtn']['xos']
-            endpoint = xos_cfg['endpoint']
-            user = xos_cfg['user']
-            password = xos_cfg['password']
-            xos_endpoints = endpoint.split(':')
-            xos_host = xos_endpoints[0]
-            xos_port = xos_endpoints[1]
-            #log.info('xos_host: %s, port: %s, user: %s, password: %s' %(xos_host, xos_port, user, password))
-            return dict(host = xos_host, port = xos_port, user = user, password = password)
-
-        return None
-
-    @classmethod
-    def getRestApi(cls):
-        try:
-            from restApi import restApi
-            restApiXos = restApi()
-            xos_credentials = cls.getCredentials()
-            if xos_credentials is None:
-                restApiXos.controllerIP = cls.HEAD_NODE
-                restApiXos.controllerPort = cls.CONTROLLER_PORT
-            else:
-                restApiXos.controllerIP = xos_credentials['host']
-                restApiXos.controllerPort = xos_credentials['port']
-                restApiXos.user = xos_credentials['user']
-                restApiXos.password = xos_credentials['password']
-
-            return restApiXos
-        except:
-            return None
-
-    def __init__(self):
-        self.restApi = self.getRestApi()
-
-    '''
-    @method search_dictionary
-    @Description: Searches for a key in the provided nested dictionary
-    @params: input_dict = dictionary to be searched
-             search_key = name of the key to be searched for
-    returns two values: search_key value and status of the search.
-             True if found (False when not found)
-
-    '''
-    def search_dictionary(self, input_dict, search_key):
-        input_keys = input_dict.keys()
-        key_value = ''
-        found = False
-        for key in input_keys:
-            if key == search_key:
-               key_value = input_dict[key]
-               found = True
-               break
-            elif type(input_dict[key]) == dict:
-                 key_value, found = self.search_dictionary(input_dict[key],search_key)
-                 if found == True:
-                    break
-            elif type(input_dict[key]) == list:
-                 if not input_dict[key]:
-                    found = False
-                    break
-                 for item in input_dict[key]:
-                     if isinstance(item, dict):
-                        key_value, found = self.search_dictionary(item, search_key)
-                        if found == True:
-                           break
-        return key_value,found
-
-    '''
-    @method getFieldValueFromDict
-    @params : search_dict - Dictionary to be searched
-             field - Key to be searched for (ex: account_num)
-    @Returns: Returns the value of the Key that was provided
-    '''
-    def getFieldValueFromDict(self,search_dict, field):
-        results = ''
-        found = False
-        input_keys = search_dict.keys()
-        for key in input_keys:
-            print "key...", key
-            if key == field:
-               results = search_dict[key]
-               if not results:
-                  found = True
-                  break
-            elif type(search_dict[key]) == dict:
-                 results, found = self.search_dictionary(search_dict[key],field)
-                 if found == True:
-                    break
-            elif type(search_dict[key]) == list:
-                 if not search_dict[key]:
-                    found = False
-                    continue
-                 for item in search_dict[key]:
-                     if isinstance(item, dict):
-                        results, found = self.search_dictionary(item, field)
-                        if found == True:
-                           break
-            if results:
-               break
-
-        return results
-
-    def getSubscriberId(self, subscriberList, account_num):
-        subscriberId = 0
-        subscriberInfo = None
-        for subscriber in subscriberList:
-            if str(subscriber['service_specific_id']) == str(account_num):
-                subscriberId = self.getFieldValueFromDict(subscriber, 'id')
-                subscriberInfo = subscriber
-                break
-        return subscriberInfo, subscriberId
-
-    def getVoltId(self, result, subInfo, s_tag = None, c_tag = None):
-        subscribed_link_ids_list = self.getFieldValueFromDict(subInfo,
-                                                              'subscribed_links_ids')
-        if len(subscribed_link_ids_list) > 0:
-            subscribed_link_ids = subscribed_link_ids_list[0]
-            service_link = self.restApi.ApiChameleonGet('CH_CORE_SERVICELINK',
-                                                        subscribed_link_ids)
-            assert_not_equal(service_link, None)
-            provider_service_instance_id = service_link.get('provider_service_instance_id',
-                                                            None)
-            assert_not_equal(provider_service_instance_id, None)
-            return provider_service_instance_id
-
-        #find the tenant for the s_tag/c_tag
-        if s_tag is None or c_tag is None:
-            return None
-
-        if result is None:
-            result = self.restApi.ApiGet('VOLT_TENANT')
-            result = result['items']
-
-        tenant = filter(lambda t: int(t['s_tag']) == int(s_tag) and \
-                        int(t['c_tag']) == int(c_tag), result)
-        if not tenant:
-            return None
-
-        return tenant[0]['id']
-
-    def getProviderInstance(self, info):
-        return info['id']
-        provided_link_ids_list = self.getFieldValueFromDict(info,
-                                                            'provided_links_ids')
-        assert_not_equal(provided_link_ids_list, None)
-        assert_not_equal(len(provided_link_ids_list), 0)
-        provided_link_ids = provided_link_ids_list[0]
-        service_link = self.restApi.ApiChameleonGet('CH_CORE_SERVICELINK',
-                                                    provided_link_ids)
-        if service_link is None:
-            return None
-        provider_service_instance_id = service_link.get('provider_service_instance_id',
-                                                        None)
-        assert_not_equal(provider_service_instance_id, None)
-        return provider_service_instance_id
-
-    def linkTenant(self, subId, tenant_info):
-        result = self.restApi.ApiGet('VOLT_TENANT')['items']
-        tenant = None
-        for volt in result:
-            if str(volt['c_tag']) == str(tenant_info['c_tag']):
-                tenant = volt
-                break
-        assert_not_equal(tenant, None)
-        volt_id = self.getFieldValueFromDict(tenant, 'id')
-        provided_links_ids_list = self.getFieldValueFromDict(tenant,
-                                                             'provided_links_ids')
-        assert_not_equal( len(provided_link_ids_list), 0)
-        provided_link_ids = provided_link_ids_list[0]
-        subscribed_link_ids_list = self.getFieldValueFromDict(tenant,
-                                                              'subscribed_links_ids')
-        assert_not_equal(len(subscribed_link_ids_list), 0)
-        subscribed_link_ids = subscribed_link_ids_list[0]
-        service_link = self.restApi.ApiChameleonGet('CH_CORE_SERVICELINK',
-                                                    provided_link_ids)
-        assert_not_equal(service_link, None)
-        provider_service_instance_id = service_link.get('provider_service_instance_id',
-                                                        None)
-        assert_not_equal(provider_service_instance_id, None)
-        service_dict = dict(subscriber_service_instance_id = subId)
-        result = self.restApi.ApiChameleonPut('CH_CORE_SERVICELINK',
-                                              service_dict,
-                                              provided_link_ids)
-        assert_equal(result, True)
-        return provider_service_instance_id
-        # service_link_dict = self.restApi.ApiChameleonGet('CH_CORE_SERVICELINK',
-        #                                                  subscribed_link_ids)
-        # assert_not_equal(service_link_dict, None)
-        # vsg_tenant = service_link_dict.get('provider_service_instance_id', None)
-        # assert_not_equal(vsg_tenant, None)
-        # vsg_result = self.restApi.ApiChameleonGet('VSG_TENANT',
-        #                                           vsg_tenant)
-        # assert_not_equal(vsg_result, None)
-        # vsg_instance = vsg_result.get('instance_id', None)
-        # assert_not_equal(vsg_instance, None)
-        # instance_result = self.restApi.ApiChameleonGet('CH_CORE_INSTANCES',
-        #                                                vsg_instance)
-        # assert_equal(instance_result, True)
-
-    def subscriberCreate(self, subscriber_info, volt_subscriber_info):
-        subId = ''
-        try:
-            result = self.restApi.ApiPost('VOLT_SUBSCRIBER', subscriber_info)
-            assert_equal(result, True)
-            result = self.restApi.ApiGet('VOLT_SUBSCRIBER')
-            assert_not_equal(result, None)
-            result = result['items']
-            _, subId = self.getSubscriberId(result,
-                                            volt_subscriber_info['service_specific_id'])
-            assert_not_equal(subId, '0')
-            log.info('Subscriber ID for account num %s = %s' %(str(volt_subscriber_info['service_specific_id']), subId))
-            volt_tenant = volt_subscriber_info['voltTenant']
-            result = self.restApi.ApiPost('VOLT_TENANT', volt_tenant)
-            assert_equal(result, True)
-            volt_id = self.linkTenant(subId, volt_tenant)
-            log.info('Subscriber create with ctag %s, stag %s, volt id %s' %(str(volt_tenant['c_tag']),
-                                                                             str(volt_tenant['s_tag']),
-                                                                             str(volt_id)))
-        finally:
-            return subId
-
-    def subscriberDelete(self, account_num, s_tag = None, c_tag = None, subId = '', voltId = ''):
-        result = self.restApi.ApiGet('VOLT_SUBSCRIBER')
-        assert_not_equal(result, None)
-        result = result['items']
-        if not subId:
-            #get the subscriber id first
-            subInfo, subId = self.getSubscriberId(result, account_num)
-            assert_not_equal(subId, '0')
-        else:
-            subInfo, currentSubId = self.getSubscriberId(result, account_num)
-            assert_not_equal(currentSubId, '0')
-            #assert_equal(subId, currentSubId)
-            subId = self.getFieldValueFromDict(subInfo, 'id')
-        if not voltId:
-            #get the volt id for the subscriber
-            result = self.restApi.ApiGet('VOLT_TENANT')
-            assert_not_equal(result, None)
-            result = result['items']
-            voltId = self.getVoltId(result, subInfo, s_tag = s_tag, c_tag = c_tag)
-            assert_not_equal(voltId, None)
-        log.info('Deleting VOLT Tenant ID %s for subscriber %s' %(voltId, subId))
-        status = self.restApi.ApiChameleonDelete('VOLT_TENANT', voltId)
-        assert_equal(status, True)
-        log.info('Deleting subscriber ID %s for account num %s' %(subId, str(account_num)))
-        status = self.restApi.ApiChameleonDelete('VOLT_SUBSCRIBER', subId)
-        assert_equal(status, True)
-
-    def subscriberId(self, account_num):
-        result = self.restApi.ApiGet('VOLT_SUBSCRIBER')
-        assert_not_equal(result, None)
-        result = result['items']
-        _, subId = self.getSubscriberId(result, account_num)
-        return subId
-
-class CordSubscriberUtils(object):
-
-    SUBSCRIBER_ACCOUNT_NUM = 100
-    SUBSCRIBER_S_TAG = 500
-    SUBSCRIBER_C_TAG = 500
-    SUBSCRIBERS_PER_S_TAG = 8
-
-    def __init__(self,
-                 num_subscribers,
-                 account_num = SUBSCRIBER_ACCOUNT_NUM,
-                 s_tag = SUBSCRIBER_S_TAG,
-                 c_tag = SUBSCRIBER_C_TAG,
-                 subscribers_per_s_tag = SUBSCRIBERS_PER_S_TAG):
-        self.num_subscribers = num_subscribers
-        self.account_num = account_num
-        self.s_tag = s_tag
-        self.c_tag = c_tag
-        self.subscribers_per_s_tag = subscribers_per_s_tag
-        self.subscriber_map = {}
-        self.tenant_map = {}
-        self.subscriber_info = self.getConfig()
-        self.volt_subscriber_info = self.getVoltConfig()
-        self.xos = XosUtils()
-
-    def getCredentials(self, subId):
-        """Generate our own account num, s_tag and c_tags"""
-        if subId in self.subscriber_map:
-            return self.subscriber_map[subId]
-        account_num = self.account_num
-        self.account_num += 1
-        s_tag, c_tag = self.s_tag, self.c_tag
-        self.c_tag += 1
-        if self.c_tag % self.subscribers_per_s_tag == 0:
-            self.s_tag += 1
-        self.subscriber_map[subId] = account_num, s_tag, c_tag
-        self.tenant_map[account_num] = (s_tag, c_tag)
-        return self.subscriber_map[subId]
-
-    def getConfig(self):
-        features =  {
-            'cdn_enable': True,
-            'uplink_speed': 1000000000,
-            'downlink_speed': 1000000000,
-            'enable_uverse': True,
-            'status': 'enabled'
-        }
-        subscriber_map = []
-        for i in xrange(self.num_subscribers):
-            subId = 'sub{}'.format(i)
-            account_num, _, _ = self.getCredentials(subId)
-            identity = { 'service_specific_id' : str(account_num),
-                         'name' : 'My House {}'.format(i)
-                         }
-            sub_data = [ (k, v) for d in (features, identity) \
-                         for k, v in d.iteritems() ]
-            sub_info = dict(sub_data)
-            subscriber_map.append(sub_info)
-
-        return subscriber_map
-
-    def getVoltInfo(self, account_num):
-        num = int(account_num)
-        if num in self.tenant_map:
-            return self.tenant_map[num]
-        return None, None
-
-    def getVoltConfig(self):
-        voltSubscriberMap = []
-        for i in xrange(self.num_subscribers):
-            subId = 'sub{}'.format(i)
-            account_num, s_tag, c_tag = self.getCredentials(subId)
-            voltSubscriberInfo = {}
-            voltSubscriberInfo['voltTenant'] = dict(s_tag = str(s_tag),
-                                                    c_tag = str(c_tag))
-            voltSubscriberInfo['service_specific_id'] = account_num
-            voltSubscriberMap.append(voltSubscriberInfo)
-
-        return voltSubscriberMap
-
-    def getVoltId(self, subInfo):
-        s_tag, c_tag = self.getVoltInfo(subInfo['service_specific_id'])
-        return self.xos.getVoltId(None, subInfo, s_tag = s_tag, c_tag = c_tag)
-
-    def getProviderInstance(self, tenant_info):
-        return self.xos.getProviderInstance(tenant_info)
-
-    def subscriberCreate(self, index, subscriber_info = None, volt_subscriber_info = None):
-        if subscriber_info is None:
-            subscriber_info = self.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        log.info('Creating tenant with s_tag: %d, c_tag: %d' %(s_tag, c_tag))
-        subId = self.xos.subscriberCreate(subscriber_info, volt_subscriber_info)
-        return subId
-
-    def subscriberDelete(self, index, subId = '', voltId = '', subscriber_info = None, volt_subscriber_info = None):
-        if subscriber_info is None:
-            subscriber_info = self.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        log.info('Deleting tenant with s_tag: %d, c_tag: %d' %(s_tag, c_tag))
-        self.xos.subscriberDelete(volt_subscriber_info['service_specific_id'], s_tag = s_tag, c_tag = c_tag, subId = subId, voltId = voltId)
-
-    def subscriberId(self, index):
-        volt_subscriber_info = self.volt_subscriber_info[index]
-        return self.xos.subscriberId(volt_subscriber_info['service_specific_id'])
diff --git a/src/test/utils/CordTestBase.py b/src/test/utils/CordTestBase.py
deleted file mode 100644
index e6b95af..0000000
--- a/src/test/utils/CordTestBase.py
+++ /dev/null
@@ -1,55 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-class CordTester(object):
-
-    def __init__(self, fsmTable, stopState, stateTable = None, eventTable = None):
-        self.fsmTable = fsmTable
-        self.stopState = stopState
-        self.stateTable = stateTable
-        self.eventTable = eventTable
-        self.currentState = None
-        self.currentEvent = None
-        self.nextState = None
-        self.nextEvent = None
-
-    def runTest(self):
-        while self.currentState != self.stopState and self.currentEvent != None:
-            if self.stateTable and self.eventTable:
-                print('Current state: %s, Current event: %s' %(self.stateTable.toStr(self.currentState),
-                                                               self.eventTable.toStr(self.currentEvent)))
-            key = (self.currentState, self.currentEvent)
-            (actions, nextState) = self.fsmTable[key]
-            if actions:
-                for a in actions:
-                    a()
-            self.currentState = nextState if self.nextState is None else self.nextState
-            self.currentEvent = self.nextEvent
diff --git a/src/test/utils/CordTestConfig.py b/src/test/utils/CordTestConfig.py
deleted file mode 100644
index 34dbb85..0000000
--- a/src/test/utils/CordTestConfig.py
+++ /dev/null
@@ -1,192 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import inspect
-import unittest
-import json
-import os
-import time
-from nose.tools import assert_not_equal
-from nose.plugins import Plugin
-from CordTestUtils import log_test as log
-from CordTestUtils import running_on_pod
-from VolthaCtrl import voltha_setup, voltha_teardown, VolthaService, VolthaCtrl
-from OnosCtrl import OnosCtrl
-from SSHTestAgent import SSHTestAgent
-log.setLevel('INFO')
-
-class CordTestConfigRestore(Plugin):
-    name = 'cordTestConfigRestore'
-    context = None
-    restore_methods = ('configRestore', 'config_restore',)
-
-    def options(self, parser, env=os.environ):
-        super(CordTestConfigRestore, self).options(parser, env = env)
-
-    def configure(self, options, conf):
-        self.enabled = True
-
-    #just save the test case context on start
-    def startContext(self, context):
-        if inspect.isclass(context) and issubclass(context, unittest.TestCase):
-            if context.__name__.endswith('exchange'):
-                self.context = context
-
-    #reset the context on exit
-    def stopContext(self, context):
-        if inspect.isclass(context) and issubclass(context, unittest.TestCase):
-            if context.__name__.endswith('exchange'):
-                self.context = None
-
-    def doFailure(self, test, exception):
-        if self.context:
-            log.info('Inside test case failure for test: %s' %self.context.__name__)
-            for restore_method in self.restore_methods:
-                if hasattr(self.context, restore_method):
-                    method = getattr(self.context, restore_method)
-                    #check only for class/static methods
-                    if method.__self__ is self.context:
-                        method()
-                        break
-
-    def addError(self, test, exception):
-        self.doFailure(test, exception)
-
-    def addFailure(self, test, exception):
-        self.doFailure(test, exception)
-
-def get_test_class(module):
-    class_test = None
-    for name, obj in inspect.getmembers(module):
-        if inspect.isclass(obj) and issubclass(obj, unittest.TestCase):
-            if obj.__name__.endswith('exchange'):
-                class_test = obj
-                break
-            else:
-                class_test = obj
-
-    return class_test
-
-def setup_module(module):
-    class_test = get_test_class(module)
-    assert_not_equal(class_test, None)
-    module_name = module.__name__.split('.')[-1]
-    cfg = '{}.json'.format(module_name)
-    module_config = os.path.join(os.path.dirname(module.__file__), cfg)
-    if os.access(module_config, os.F_OK):
-        with open(module_config) as f:
-            json_data = json.load(f)
-            for k, v in json_data.iteritems():
-                setattr(class_test, k, v)
-
-    #check for voltha and configure as appropriate
-    voltha_attrs = dict(host = VolthaService.DOCKER_HOST_IP,
-                        ponsim_host = VolthaService.PONSIM_HOST,
-                        rest_port = VolthaCtrl.REST_PORT,
-                        config_fake = False,
-                        olt_type = 'ponsim_olt',
-                        olt_mac = '00:0c:e2:31:12:00',
-                        olt_ip = None,
-                        uplink_vlan_map = { 'of:0000000000000001' : '222' },
-                        uplink_vlan_start = 333,
-                        teardown = True,
-                        )
-    voltha_enabled = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
-    voltha_configure = True
-
-    olt_switch_map = {}
-
-    if hasattr(class_test, 'VOLTHA_AUTO_CONFIGURE'):
-        voltha_configure = getattr(class_test, 'VOLTHA_AUTO_CONFIGURE')
-
-    tagged_traffic = False
-    if hasattr(class_test, 'TAGGED_TRAFFIC'):
-        tagged_traffic = getattr(class_test, 'TAGGED_TRAFFIC')
-
-    if hasattr(class_test, 'VOLTHA_HOST'):
-        #update the voltha host ip based on chameleon IP for rest interface
-        rest_interface = VolthaService.get_ip('chameleon')
-        if rest_interface:
-            log.info('Updating VOLTHA_HOST IP to %s' %rest_interface)
-            setattr(class_test, 'VOLTHA_HOST', rest_interface)
-
-    if voltha_enabled and voltha_configure:
-        for k,v in voltha_attrs.iteritems():
-            voltha_attr = 'VOLTHA_{}'.format(k.upper())
-            if hasattr(class_test, voltha_attr):
-                v = getattr(class_test, voltha_attr)
-                voltha_attrs[k] = v
-            else:
-                setattr(class_test, voltha_attr, v)
-        ret = voltha_setup(**voltha_attrs)
-        if ret is not None:
-            #setup the stage to drop voltha on the way out
-            setattr(class_test, 'voltha_ctrl', ret[0])
-            setattr(class_test, 'voltha_device', ret[1])
-            setattr(class_test, 'voltha_switch_map', ret[2])
-            olt_switch_map = ret[2]
-            voltha_driver_configured = ret[3]
-            setattr(class_test, 'voltha_preconfigured', voltha_driver_configured)
-            if voltha_driver_configured:
-                setattr(class_test, 'VOLTHA_TEARDOWN', False)
-
-    #load the sadis and aaa config
-    OnosCtrl.sadis_load_config(olt_switch_map = olt_switch_map, tagged_traffic = tagged_traffic)
-    OnosCtrl.aaa_load_config()
-    #OnosCtrl('org.opencord.aaa').deactivate()
-    #time.sleep(3)
-    #OnosCtrl('org.opencord.aaa').activate()
-    #time.sleep(3)
-    if voltha_enabled is False:
-        OnosCtrl.config_olt_access(VolthaCtrl.UPLINK_VLAN_START)
-
-def teardown_module(module):
-    class_test = get_test_class(module)
-    if class_test is None:
-        return
-    if not hasattr(class_test, 'voltha_ctrl') or \
-       not hasattr(class_test, 'voltha_device') or \
-       not hasattr(class_test, 'voltha_switch_map') or \
-       not hasattr(class_test, 'voltha_preconfigured') or \
-       not hasattr(class_test, 'VOLTHA_TEARDOWN'):
-        return
-    voltha_ctrl = getattr(class_test, 'voltha_ctrl')
-    voltha_device = getattr(class_test, 'voltha_device')
-    voltha_switch_map = getattr(class_test, 'voltha_switch_map')
-    voltha_preconfigured = getattr(class_test, 'voltha_preconfigured')
-    voltha_tear = getattr(class_test, 'VOLTHA_TEARDOWN')
-    if voltha_preconfigured is False and voltha_tear is True:
-        voltha_teardown(voltha_ctrl, voltha_device, voltha_switch_map)
-
-def running_on_ciab():
-    if running_on_pod() is False:
-        return False
-    head_node = os.getenv('HEAD_NODE', 'prod')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    agent = SSHTestAgent(host = HEAD_NODE, user = 'ubuntu', password = 'ubuntu')
-    #see if user ubuntu works
-    st, output = agent.run_cmd('sudo virsh list')
-    if st is False and output is not None:
-        #we are on real pod
-        return False
-
-    #try vagrant
-    agent = SSHTestAgent(host = HEAD_NODE, user = 'vagrant', password = 'vagrant')
-    st, output = agent.run_cmd('sudo virsh list')
-    if st is True and output is not None:
-        return True
-
-    return False
diff --git a/src/test/utils/CordTestServer.py b/src/test/utils/CordTestServer.py
deleted file mode 100644
index 6e1ae31..0000000
--- a/src/test/utils/CordTestServer.py
+++ /dev/null
@@ -1,396 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from CordContainer import Container, Onos, OnosStopWrapper, OnosCord, OnosCordStopWrapper, Quagga, QuaggaStopWrapper, Radius, reinitContainerClients
-from OltConfig import OltConfig
-from OnosCtrl import OnosCtrl
-from CordTestUtils import get_controller
-from EapolAAA import get_radius_macs, get_radius_networks
-from nose.tools import nottest
-from SimpleXMLRPCServer import SimpleXMLRPCServer
-from resource import getrlimit, RLIMIT_NOFILE
-import daemon
-import xmlrpclib
-import os
-import signal
-import json
-import time
-import threading
-
-##Server to handle container restart/stop requests from test container.
-##Used now to restart ONOS from vrouter test container
-
-CORD_TEST_HOST = '172.17.0.1'
-CORD_TEST_PORT = 25000
-
-class CordTestServer(object):
-
-    onos_cord = None
-
-    def __ovs_flow_add(self, in_port = 1, dl_vlan = 0):
-        if dl_vlan:
-            cmd = 'ovs-ofctl -O OpenFlow13 add-flow br-int in_port=%d,dl_vlan=%d,actions=CONTROLLER:65535' %(in_port, dl_vlan)
-        else:
-            cmd = 'ovs-ofctl -O OpenFlow13 add-flow br-int in_port=%d,actions=CONTROLLER:65535' %(in_port)
-        os.system(cmd)
-        return 'DONE'
-
-    def ovs_flow_add(self, kwargs):
-        return self.__ovs_flow_add(**kwargs)
-
-    def __restart_onos(self, node = None, config = None, timeout = 10):
-        if self.onos_cord:
-            onos_config = '{}/network-cfg.json'.format(self.onos_cord.onos_config_dir)
-        else:
-            onos_config = '{}/network-cfg.json'.format(Onos.host_config_dir)
-        if config is None:
-            try:
-                os.unlink(onos_config)
-            except:
-                pass
-        print('Restarting ONOS')
-        if self.onos_cord:
-            self.onos_cord.start(restart = True, network_cfg = config)
-        else:
-            Onos.restart_node(node = node, network_cfg = config, timeout = timeout)
-        return 'DONE'
-
-    def restart_onos(self, kwargs):
-        return self.__restart_onos(**kwargs)
-
-    def __shutdown_onos(self, node = None):
-        if node is None:
-            node = Onos.NAME
-        OnosStopWrapper(node)
-        return 'DONE'
-
-    def shutdown_onos(self, kwargs):
-        return self.__shutdown_onos(**kwargs)
-
-    def __restart_cluster(self, config = None, timeout = 10, setup = False):
-        Onos.restart_cluster(network_cfg = config, timeout = timeout, setup = setup)
-        return 'DONE'
-
-    def restart_cluster(self, kwargs):
-        return self.__restart_cluster(**kwargs)
-
-    def __add_cluster_onos(self, count = 1, config = None):
-        Onos.add_cluster(count = count, network_cfg = config)
-        return 'DONE'
-
-    def add_cluster_onos(self, kwargs):
-        return self.__add_cluster_onos(**kwargs)
-
-    def __restart_quagga(self, config = None, boot_delay = 30 ):
-        config_file = Quagga.quagga_config_file
-        if config is not None:
-            quagga_config = '{}/testrib_gen.conf'.format(Quagga.host_quagga_config)
-            config_file = '{}/testrib_gen.conf'.format(Quagga.guest_quagga_config)
-            with open(quagga_config, 'w+') as fd:
-                fd.write(str(config))
-        print('Restarting QUAGGA with config file %s, delay %d' %(config_file, boot_delay))
-        Quagga(prefix = Container.IMAGE_PREFIX, restart = True, config_file = config_file, boot_delay = boot_delay)
-        return 'DONE'
-
-    def restart_quagga(self, kwargs):
-        return self.__restart_quagga(**kwargs)
-
-    def stop_quagga(self):
-        quaggaStop = QuaggaStopWrapper()
-        time.sleep(5)
-        try:
-            quagga_config_gen = '{}/testrib_gen.conf'.format(Quagga.host_quagga_config)
-            os.unlink(quagga_config_gen)
-        except: pass
-        return 'DONE'
-
-    def __run_shell_quagga(self, cmd = None):
-        ret = 0
-        if cmd is not None:
-            exec_cmd = 'docker exec {} {}'.format(Quagga.NAME, cmd)
-            ret = os.system(exec_cmd)
-        return ret
-
-    def __run_shell(self, cmd = None):
-        ret = 0
-        if cmd is not None:
-            ret = os.system(cmd)
-        return ret
-
-    def run_shell_quagga(self, kwargs):
-        return self.__run_shell_quagga(**kwargs)
-
-    def run_shell(self, kwargs):
-        return self.__run_shell(**kwargs)
-
-    def __restart_radius(self, olt_conf_file = ''):
-        olt_conf = os.path.join(Onos.setup_dir, os.path.basename(olt_conf_file))
-        olt = OltConfig(olt_conf_file = olt_conf)
-        port_map, _ = olt.olt_port_map()
-        Radius(prefix = Container.IMAGE_PREFIX, restart = True)
-        radius_macs = get_radius_macs(len(port_map['radius_ports']))
-        radius_networks = get_radius_networks(len(port_map['switch_radius_port_list']))
-        radius_intf_index = 0
-        index = 0
-        for host_intf, ports in port_map['switch_radius_port_list']:
-            prefix, subnet, _ = radius_networks[index]
-            mask = subnet.split('/')[-1]
-            index += 1
-            for port in ports:
-                guest_if = 'eth{}'.format(radius_intf_index + 2)
-                port_index = port_map[port]
-                local_if = 'r{}'.format(port_index)
-                guest_ip = '{}.{}/{}'.format(prefix, port_index, mask)
-                mac = radius_macs[radius_intf_index]
-                radius_intf_index += 1
-                pipework_cmd = 'pipework {0} -i {1} -l {2} {3} {4} {5}'.format(host_intf, guest_if,
-                                                                               local_if, Radius.NAME,
-                                                                               guest_ip, mac)
-                print('Configuring Radius port %s on OVS bridge %s' %(guest_if, host_intf))
-                print('Running pipework command: %s' %(pipework_cmd))
-                res = os.system(pipework_cmd)
-
-    def restart_radius(self, kwargs):
-        print('Restarting RADIUS Server')
-        self.__restart_radius(**kwargs)
-        return 'DONE'
-
-    def shutdown(self):
-        print('Shutting down cord test server')
-        os.kill(0, signal.SIGKILL)
-        return 'DONE'
-
-def find_files_by_path(*paths):
-    wanted = []
-    for p in paths:
-        try:
-            fd = os.open(p, os.O_RDONLY)
-            wanted.append(os.fstat(fd)[1:3])
-        finally:
-            os.close(fd)
-
-    def fd_wanted(fd):
-        try:
-            return os.fstat(fd)[1:3] in wanted
-        except OSError:
-            return False
-
-    max_fd = getrlimit(RLIMIT_NOFILE)[1]
-    return [ fd for fd in xrange(max_fd) if fd_wanted(fd) ]
-
-@nottest
-def cord_test_server_start(daemonize = True,
-                           cord_test_host = CORD_TEST_HOST,
-                           cord_test_port = CORD_TEST_PORT,
-                           onos_cord = None,
-                           foreground=False):
-    server = SimpleXMLRPCServer( (cord_test_host, cord_test_port) )
-    server.register_instance(CordTestServer())
-    CordTestServer.onos_cord = onos_cord
-    if daemonize is True:
-        ##before daemonizing, preserve urandom needed by paramiko
-        preserve_list = find_files_by_path('/dev/urandom')
-        preserve_list.append(server)
-        d = daemon.DaemonContext(files_preserve = preserve_list,
-                                 detach_process = True)
-        with d:
-            reinitContainerClients()
-            server.serve_forever()
-    else:
-        if foreground:
-            try:
-                server.serve_forever()
-            except KeyboardInterrupt:
-                return server
-        else:
-            task = threading.Thread(target = server.serve_forever)
-            ##terminate when main thread exits
-            task.daemon = True
-            task.start()
-    return server
-
-@nottest
-def cord_test_server_stop(server):
-    server.shutdown()
-    server.server_close()
-
-@nottest
-def get_cord_test_loc():
-    host = os.getenv('CORD_TEST_HOST', CORD_TEST_HOST)
-    port = int(os.getenv('CORD_TEST_PORT', CORD_TEST_PORT))
-    return host, port
-
-def rpc_server_instance(host = None, port = None):
-    '''Stateless'''
-    if host is None or port is None:
-        host, port = get_cord_test_loc()
-    rpc_server = 'http://{}:{}'.format(host, port)
-    return xmlrpclib.Server(rpc_server, allow_none = True)
-
-@nottest
-def __cord_test_onos_restart(**kwargs):
-    return rpc_server_instance().restart_onos(kwargs)
-
-@nottest
-def cord_test_onos_restart(node = None, config = None, timeout = 10):
-    '''Send ONOS restart to server'''
-    for i in range(3):
-        try:
-            data = __cord_test_onos_restart(node = node, config = config, timeout = timeout)
-            if data == 'DONE':
-                return True
-        except:
-            time.sleep(2)
-
-    return False
-
-@nottest
-def __cord_test_onos_shutdown(**kwargs):
-    return rpc_server_instance().shutdown_onos(kwargs)
-
-@nottest
-def cord_test_onos_shutdown(node = None):
-    data = __cord_test_onos_shutdown(node = node)
-    if data == 'DONE':
-        return True
-    return False
-
-@nottest
-def __cord_test_restart_cluster(**kwargs):
-    return rpc_server_instance().restart_cluster(kwargs)
-
-@nottest
-def cord_test_restart_cluster(config = None, timeout = 10, setup = False):
-    for i in range(3):
-        try:
-            data = __cord_test_restart_cluster(config = config, timeout = timeout, setup = setup)
-            if data == 'DONE':
-                return True
-        except:
-            time.sleep(2)
-
-    return False
-
-@nottest
-def __cord_test_onos_add_cluster(**kwargs):
-    return rpc_server_instance().add_cluster_onos(kwargs)
-
-@nottest
-def cord_test_onos_add_cluster(count = 1, config = None):
-    data = __cord_test_onos_add_cluster(count = count, config = config)
-    if data == 'DONE':
-        return True
-    return False
-
-@nottest
-def __cord_test_quagga_restart(**kwargs):
-    return rpc_server_instance().restart_quagga(kwargs)
-
-@nottest
-def __cord_test_radius_restart(**kwargs):
-    return rpc_server_instance().restart_radius(kwargs)
-
-@nottest
-def cord_test_quagga_restart(config = None, boot_delay = 30):
-    '''Send QUAGGA restart to server'''
-    data = __cord_test_quagga_restart(config = config, boot_delay = boot_delay)
-    if data == 'DONE':
-        return True
-    return False
-
-@nottest
-def __cord_test_quagga_shell(**kwargs):
-    return rpc_server_instance().run_shell_quagga(kwargs)
-
-@nottest
-def cord_test_quagga_shell(cmd = None):
-    '''Send QUAGGA shell cmd to server'''
-    return __cord_test_quagga_shell(cmd = cmd)
-
-@nottest
-def __cord_test_shell(**kwargs):
-    return rpc_server_instance().run_shell(kwargs)
-
-@nottest
-def cord_test_shell(cmd = None):
-    '''Send shell cmd to run remotely'''
-    return __cord_test_shell(cmd = cmd)
-
-@nottest
-def cord_test_quagga_stop():
-    data = rpc_server_instance().stop_quagga()
-    if data == 'DONE':
-        return True
-    return False
-
-@nottest
-def cord_test_radius_restart(olt_conf_file = ''):
-    '''Send Radius server restart to server'''
-    if not olt_conf_file:
-        olt_conf_file = os.getenv('OLT_CONFIG')
-    olt_conf_file = os.path.basename(olt_conf_file)
-    data = __cord_test_radius_restart(olt_conf_file = olt_conf_file)
-    if data == 'DONE':
-        return True
-    return False
-
-@nottest
-def cord_test_server_shutdown(host, port):
-    '''Shutdown the cord test server'''
-    rpc_server = 'http://{}:{}'.format(host, port)
-    try:
-        xmlrpclib.Server(rpc_server, allow_none = True).shutdown()
-    except: pass
-
-    return True
-
-@nottest
-def __cord_test_ovs_flow_add(**kwargs):
-    controller = get_controller()
-    OnosCtrl.config_extraneous_flows(controller = controller)
-    try:
-        return rpc_server_instance(host = controller, port = CORD_TEST_PORT).ovs_flow_add(kwargs)
-    except:
-        pass
-
-    try:
-        return rpc_server_instance().ovs_flow_add(kwargs)
-    except:
-        pass
-
-    return 'FAIL'
-
-@nottest
-def cord_test_ovs_flow_add(in_port, dl_vlan = 0):
-    data = __cord_test_ovs_flow_add(in_port = in_port, dl_vlan = dl_vlan)
-    if data == 'DONE':
-        return True
-    return False
diff --git a/src/test/utils/CordTestUtils.py b/src/test/utils/CordTestUtils.py
deleted file mode 100644
index 97039f4..0000000
--- a/src/test/utils/CordTestUtils.py
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import subprocess
-import socket
-import fcntl
-import struct
-import os
-import logging
-
-log_test = logging.getLogger('cordTester')
-test_consolehandler = logging.StreamHandler()
-#test_consolehandler.setFormatter(logging.Formatter("%(levelname)s:%(message)s"))
-log_test.addHandler(test_consolehandler)
-
-# we use subprocess as commands.getstatusoutput would be deprecated
-def getstatusoutput(cmd):
-    command = [ '/bin/bash', '-c', cmd ]
-    p = subprocess.Popen(command, stdout = subprocess.PIPE)
-    out, _ = p.communicate()
-    return p.returncode, out.strip()
-
-def get_ip(iface):
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    try:
-        info = fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', bytes(iface[:15])))
-    except:
-        info = None
-    s.close()
-    if info:
-        return '.'.join( [ str(ord(c)) for c in info[20:24] ] )
-    return None
-
-def get_mac(iface = None, pad = 4):
-    if iface is None:
-        iface = os.getenv('TEST_SWITCH', 'ovsbr0')
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    try:
-        info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(iface[:15])))
-    except:
-        info = ['0'] * 24
-    s.close()
-    sep = ''
-    if pad == 0:
-        sep = ':'
-    return '0'*pad + sep.join(['%02x' %ord(char) for char in info[18:24]])
-
-def get_default_gw():
-    cmd = "ip route show | grep default | head -1 | awk '{print $3}'"
-    cmd_dev = "ip route show | grep default | head -1 | awk '{print $NF}'"
-    st, gw = getstatusoutput(cmd)
-    st2, gw_device = getstatusoutput(cmd_dev)
-    if st != 0:
-        gw = None
-    if st2 != 0:
-        gw_device = None
-    return gw, gw_device
-
-def get_controllers():
-    controllers = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
-    return controllers.split(',')
-
-def get_controller():
-    controllers = get_controllers()
-    return controllers[0]
-
-def running_on_pod():
-    """If we are running on Ciab or inside a physical podd, key file would be set"""
-    return True if os.environ.get('SSH_KEY_FILE', None) else False
diff --git a/src/test/utils/DHCP.py b/src/test/utils/DHCP.py
deleted file mode 100644
index 35177b3..0000000
--- a/src/test/utils/DHCP.py
+++ /dev/null
@@ -1,369 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from scapy.all import *
-from CordTestUtils import log_test
-
-conf.verb = 0 # Disable Scapy verbosity
-conf.checkIPaddr = 0 # Don't check response packets for matching destination IPs
-
-class DHCPTest:
-
-    def __init__(self, seed_ip = '192.168.1.1', iface = 'veth0',lease_time=600):
-        self.seed_ip = seed_ip
-        self.seed_mac = self.ipToMac(self.seed_ip)
-        self.iface = iface
-	self.lease_time = lease_time
-        self.mac_map = {}
-        self.mac_inverse_map = {}
-	self.bootpmac = None
-	self.dhcpresp = None
-	self.servermac = None
-	self.return_option = None
-	self.after_T2 = False
-	self.send_different_option = None
-        self.specific_lease = None
-
-    def is_mcast(self, ip):
-        mcast_octet = (atol(ip) >> 24) & 0xff
-        return True if mcast_octet >= 224 and mcast_octet <= 239 else False
-
-    def discover(self, mac = None, update_seed = False):
-        '''Send a DHCP discover/offer'''
-
-        if mac is None:
-            mac = self.seed_mac
-            if update_seed:
-                self.seed_ip = self.incIP(self.seed_ip)
-                self.seed_mac = self.ipToMac(self.seed_ip)
-                mac = self.seed_mac
-
-        chmac = self.macToChaddr(mac)
-	#log_test.info('mac and chmac are %s %s'%(mac, chmac))
-	self.bootpmac = chmac
-        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
-        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
-        L4 = UDP(sport=68, dport=67)
-        L5 = BOOTP(chaddr=chmac)
-        L6 = DHCP(options=[("message-type","discover"),"end"])
-        resp = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
-	#log_test.info('dhcp discover packet is %s'%(L2/L3/L4/L5/L6).show())
-        self.dhcpresp = resp
-	#log_test.info('discover response is %s'%resp.show())
-        try:
-            srcIP = resp.yiaddr
-            serverIP = resp.siaddr
-        except AttributeError:
-            log_test.info("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
-            return (None, None)
-
-        subnet_mask = "0.0.0.0"
-        for x in resp.lastlayer().options:
-            if(x == 'end'):
-                break
-            op,val = x
-            if(op == "subnet_mask"):
-                subnet_mask = val
-            elif(op == 'server_id'):
-                server_id = val
-
-        L5 = BOOTP(chaddr=chmac, yiaddr=srcIP)
-        L6 = DHCP(options=[("message-type","request"), ("server_id",server_id),
-                           ("subnet_mask",subnet_mask), ("requested_addr",srcIP), "end"])
-        resp2 = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
-	#log_test.info('request response is %s'%resp2.show())
-        self.mac_map[mac] = (srcIP, serverIP)
-        self.mac_inverse_map[srcIP] = (mac, serverIP)
-        return (srcIP, serverIP)
-
-    def only_discover(self, mac = None, desired = False, lease_time = False, lease_value=600, multiple = False):
-        '''Send a DHCP discover'''
-
-        if mac is None:
-	    if multiple:
-               mac = RandMAC()._fix()
-	    else:
-               mac = self.seed_mac
-
-
-        chmac = self.macToChaddr(mac)
-	self.bootpmac = chmac
-        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
-        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
-        L4 = UDP(sport=68, dport=67)
-        L5 = BOOTP(chaddr=chmac)
-	if desired:
-		L6 = DHCP(options=[("message-type","discover"),("requested_addr",self.seed_ip),"end"])
-
-	elif lease_time:
-		L6 = DHCP(options=[("message-type","discover"),("lease_time",lease_value),"end"])
-
-	else:
-	        L6 = DHCP(options=[("message-type","discover"),"end"])
-	#log_test.info('only discover packet is %s'%(L2/L3/L4/L5/L6).show())
-
-        resp = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
-	#log_test.info('discovery packet is %s'%(L2/L3/L4/L5/L6).show())
-	if resp == None:
-                return (None, None, mac, None)
-	#log_test.info('only discover response is %s'%resp.show())
-
-	self.dhcpresp = resp
-        for x in resp.lastlayer().options:
-            if(x == 'end'):
-                break
-            op,val = x
-            if(op == "message-type"):
-
-	    	if(val == 2):
-
-			try:
-            			srcIP = resp.yiaddr
-            			serverIP = resp.siaddr
-        		except AttributeError:
-           			log_test.info("In Attribute error.")
-            		 	log_test.info("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
-                                return (None, None, None, None)
-
-			if self.return_option:
-				for x in resp.lastlayer().options:
-        	    			if(x == 'end'):
-                				break
-	            			op,val = x
-
-	        	    		if op == "lease_time":
-						if self.return_option == 'lease':
-							return (srcIP, serverIP, mac, val)
-
-	        	    		elif op == "subnet_mask":
-						if self.return_option == 'subnet':
-							return (srcIP, serverIP, mac, val)
-					elif op == "router":
-						if self.return_option == 'router':
-							return (srcIP, serverIP, mac, val)
-					elif op == "broadcast_address":
-						if self.return_option == 'broadcast_address':
-							return (srcIP, serverIP, mac, val)
-					elif op == "name_server":
-						if self.return_option == 'dns':
-							return (srcIP, serverIP, mac, val)
-
-
-			else:
-				return (srcIP, serverIP, mac, None)
-		elif(val == 6):
-			return (None, None, mac, None)
-
-
-    def only_request(self, cip, mac, cl_reboot = False, lease_time = False, lease_value=600, renew_time = False, rebind_time = False, unicast = False):
-        '''Send a DHCP offer'''
-
-	subnet_mask = "0.0.0.0"
-        for x in self.dhcpresp.lastlayer().options:
-            	if(x == 'end'):
-                	break
-            	op,val = x
-            	if(op == "subnet_mask"):
-                	subnet_mask = val
-            	elif(op == 'server_id'):
-                	server_id = val
-
-	if unicast and self.servermac:
-        	L2 = Ether(dst=self.servermac, src=mac)
-	        L3 = IP(src=cip, dst=server_id)
-	else:
-	        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
-		if self.after_T2:
-	        	L3 = IP(src=cip, dst="255.255.255.255")
-		else:
-		        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
-        L4 = UDP(sport=68, dport=67)
-
-	if self.after_T2 == True:
-        	L5 = BOOTP(chaddr=self.bootpmac, ciaddr = cip)
-	else:
-
-	        L5 = BOOTP(chaddr=self.bootpmac, yiaddr=cip)
-
-	if cl_reboot or self.after_T2:
-                L6 = DHCP(options=[("message-type","request"),("subnet_mask",subnet_mask), ("requested_addr",cip), "end"])
-	elif self.send_different_option:
-		if self.send_different_option == 'subnet':
-	       		L6 = DHCP(options=[("message-type","request"),("server_id",server_id),
-        	                   	("subnet_mask",'255.255.252.252'), ("requested_addr",cip), "end"])
-		elif self.send_different_option == 'router':
-	       		L6 = DHCP(options=[("message-type","request"),("server_id",server_id),
-        	                   	("subnet_mask",subnet_mask), ("router",'1.1.1.1'), ("requested_addr",cip), "end"])
-		elif self.send_different_option == 'broadcast_address':
-	       		L6 = DHCP(options=[("message-type","request"),("server_id",server_id),
-        	                   	("subnet_mask",subnet_mask), ("broadcast_address",'1.1.1.1'), ("requested_addr",cip), "end"])
-
-		elif self.send_different_option == 'dns':
-	       		L6 = DHCP(options=[("message-type","request"),("server_id",server_id),
-        	                   	("subnet_mask",subnet_mask), ("name_server",'1.1.1.1'), ("requested_addr",cip), "end"])
-
-	elif lease_time:
-             L6 = DHCP(options=[("message-type","request"), ("server_id",server_id),
-                                ("subnet_mask",subnet_mask), ("requested_addr",cip),("lease_time",lease_value), "end"])
-	else:
-             L6 = DHCP(options=[("message-type","request"), ("server_id",server_id),
-                           	("subnet_mask",subnet_mask), ("requested_addr",cip), "end"])
-
-	resp=srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
-	#log_test.info('request packet is %s'%(L2/L3/L4/L5/L6).show())
-	#log_test.info('response packet is %s'%resp.show())
-	if resp == None:
-        	return (None, None)
-
-
-	self.servermac = resp.getlayer(Ether).src
-
-	for x in resp.lastlayer().options:
-            	if(x == 'end'):
-                	break
-            	op,val = x
-            	if(op == "message-type"):
-
-			if(val == 5):
-				try:
-            				srcIP = resp.yiaddr
-            				serverIP = resp.siaddr
-					self.mac_map[mac] = (srcIP, serverIP)
-                                        self.mac_inverse_map[srcIP] = (mac, serverIP)
-        			except AttributeError:
-           				log_test.info("In Attribute error.")
-            				log_test.info("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
-            				return (None, None)
-
-				if lease_time or renew_time or rebind_time or self.specific_lease:
-					for x in resp.lastlayer().options:
-            					if(x == 'end'):
-                					break
-	            				op,val = x
-
-        	    				if op == "lease_time":
-
-							if self.specific_lease:
-								return (srcIP, serverIP, val)
-							if lease_time == True:
-								self.mac_map[mac] = (srcIP, serverIP)
-			        				self.mac_inverse_map[srcIP] = (mac, serverIP)
-								return (srcIP, serverIP, val)
-	            				elif op == "renewal_time":
-							if renew_time == True:
-								self.mac_map[mac] = (srcIP, serverIP)
-				        			self.mac_inverse_map[srcIP] = (mac, serverIP)
-								return (srcIP, serverIP, val)
-            					elif op == "rebinding_time":
-							if rebind_time == True:
-								self.mac_map[mac] = (srcIP, serverIP)
-			        				self.mac_inverse_map[srcIP] = (mac, serverIP)
-								return (srcIP, serverIP, val)
-				else:
-					self.mac_map[mac] = (srcIP, serverIP)
-					self.mac_inverse_map[srcIP] = (mac, serverIP)
-					return (srcIP, serverIP)
-			elif(val == 6):
-
-				log_test.info("Got DHCP NAK.")
-				return (None, None)
-
-
-
-    def discover_next(self):
-        '''Send next dhcp discover/request with updated mac'''
-        return self.discover(update_seed = True)
-
-    def release(self, ip):
-        '''Send a DHCP discover/offer'''
-        if ip is None:
-            return False
-        if not self.mac_inverse_map.has_key(ip):
-            return False
-        mac, server_ip = self.mac_inverse_map[ip]
-        chmac = self.macToChaddr(mac)
-        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
-        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
-        L4 = UDP(sport=68, dport=67)
-        L5 = BOOTP(chaddr=chmac, ciaddr = ip)
-        L6 = DHCP(options=[("message-type","release"), ("server_id", server_ip), "end"])
-        sendp(L2/L3/L4/L5/L6, iface = self.iface, count=2)
-	#log_test.info('release response is %s'%resp)
-        del self.mac_map[mac]
-        del self.mac_inverse_map[ip]
-        return True
-
-    def macToChaddr(self, mac):
-        rv = []
-        mac = mac.split(":")
-        for x in mac:
-            rv.append(chr(int(x, 16)))
-        return reduce(lambda x,y: x + y, rv)
-
-    def get_ip(self, mac):
-        if self.mac_map.has_key(mac):
-            return self.mac_map[mac]
-        return (None, None)
-
-    def get_mac(self, ip):
-        if self.mac_inverse_map.has_key(ip):
-            return self.mac_inverse_map[ip]
-        return (None, None)
-
-    def ipToMac(self, ip):
-        '''Generate a mac from a ip'''
-
-        mcast = self.is_mcast(ip)
-        mac = "01:00:5e" if mcast == True else "00:00:00"
-        octets = ip.split(".")
-        for x in range(1,4):
-            num = str(hex(int(octets[x])))
-            num =  num.split("x")[1]
-            if len(num) < 2:
-                num = "0" + str(num)
-            mac += ":" + num
-        return mac
-
-    def incIP(self, ip, n=1):
-        '''Increment an IP'''
-
-        if n < 1:
-            return ip
-        o = ip.split(".")
-        for ii in range(3,-1,-1):
-            if int(o[ii]) < 255:
-                o[ii] = str(int(o[ii]) + 1)
-                break
-            else:
-                o[ii] = str(0)
-
-        n -= 1
-        return self.incIP(".".join(o), n)
diff --git a/src/test/utils/EapMD5.py b/src/test/utils/EapMD5.py
deleted file mode 100644
index b3f5834..0000000
--- a/src/test/utils/EapMD5.py
+++ /dev/null
@@ -1,125 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import sys, os
-from EapolAAA import *
-from Enum import *
-import noseMd5AuthHolder as md5AuthHolder
-from socket import *
-from struct import *
-from md5 import md5
-from scapy.all import *
-from nose.tools import *
-from CordTestBase import CordTester
-
-class MD5AuthTest(EapolPacket, CordTester):
-
-    md5StateTable = Enumeration("MD5StateTable", ("ST_EAP_SETUP",
-                                                  "ST_EAP_START",
-                                                  "ST_EAP_ID_REQ",
-                                                  "ST_EAP_MD5_CHALLENGE",
-                                                  "ST_EAP_STATUS",
-                                                  "ST_EAP_MD5_DONE"
-                                                  )
-                                )
-    md5EventTable = Enumeration("MD5EventTable", ("EVT_EAP_SETUP",
-                                                  "EVT_EAP_START",
-                                                  "EVT_EAP_ID_REQ",
-                                                  "EVT_EAP_MD5_CHALLENGE",
-                                                  "EVT_EAP_STATUS",
-                                                  "EVT_EAP_MD5_DONE"
-                                                  )
-                                )
-    def __init__(self, intf = 'veth0', password = "password", required_status = "EAP_SUCCESS"):
-        self.passwd = password
-        self.req_status = required_status
-        self.fsmTable = md5AuthHolder.initMd5AuthHolderFsmTable(self, self.md5StateTable, self.md5EventTable)
-        EapolPacket.__init__(self, intf)
-        CordTester.__init__(self, self.fsmTable, self.md5StateTable.ST_EAP_MD5_DONE)
-        self.currentState = self.md5StateTable.ST_EAP_SETUP
-        self.currentEvent = self.md5EventTable.EVT_EAP_SETUP
-        self.nextState = None
-        self.nextEvent = None
-
-    def _eapSetup(self):
-        print('Inside EAP Setup')
-        self.setup()
-        self.nextEvent = self.md5EventTable.EVT_EAP_START
-
-    def _eapStart(self):
-        print('Inside EAP Start')
-        self.eapol_start()
-        self.nextEvent = self.md5EventTable.EVT_EAP_ID_REQ
-
-    def _eapIdReq(self):
-        print('Inside EAP ID Req')
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print("Code %d, id %d, len %d" %(code, pkt_id, eaplen))
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        reqdata = p[5:4+eaplen]
-        assert_equal(reqtype, EAP_TYPE_ID)
-        print("<====== Send EAP Response with identity = %s ================>" % USER)
-        self.eapol_id_req(pkt_id, USER)
-        self.nextEvent = self.md5EventTable.EVT_EAP_MD5_CHALLENGE
-
-    def _eapMd5Challenge(self):
-        print('Inside EAP MD5 Challenge Exchange')
-        challenge,pkt_id =self.eap_md5_challenge_recv(self.passwd)
-        resp=md5(challenge).digest()
-        resp=chr(len(resp))+resp
-        length= 5+len(resp)
-        print("Generated MD5 challenge is %s Length : %d" % (resp,length))
-        print("--> Send EAP response with MD5 challenge")
-        eap_payload = self.eap(EAP_RESPONSE, pkt_id, EAP_TYPE_MD5, str(resp))
-        self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-        self.nextEvent = self.md5EventTable.EVT_EAP_STATUS
-
-    def _eapStatus(self):
-       print('Inside EAP Status -- Sucess/Failure')
-       if self.req_status == "EAP_SUCCESS":
-         status=self.eap_Status()
-         print("<============EAP code received is = %d ====================>" % status)
-         assert_equal(status, EAP_SUCCESS)
-         print("Received EAP SUCCESS")
-       else:
-         print('Inside EAP Status -- Sucess/Failure ===> SUCCESS should not be received , Since Negative Testcase')
-         self.s.settimeout(10)
-         assert_equal(self.s.gettimeout(), 10)
-         print("Check if the socket timed out ====> Since negative testcase socket should timeout because ONOS is not sending the EAP FAILURE Message")
-         assert_raises(socket.error, self.s.recv, 1024)
-       self.nextEvent = self.md5EventTable.EVT_EAP_MD5_DONE
-
-    def _wrong_password(self):
-       print('Start Testcase for EAP-MD5 Wrong Password')
-       #self._eap_md5_states()
-       self.__init__(intf = 'veth0', password = "wrong_password", required_status = "EAP_FAILURE")
diff --git a/src/test/utils/EapPAP.py b/src/test/utils/EapPAP.py
deleted file mode 100644
index 3a143a7..0000000
--- a/src/test/utils/EapPAP.py
+++ /dev/null
@@ -1,117 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import sys, os
-from EapolAAA import *
-from Enum import *
-import nosePAPAuthHolder as PAPAuthHolder
-from socket import *
-from struct import *
-from scapy.all import *
-from nose.tools import *
-from CordTestBase import CordTester
-from CordTestUtils import log_test
-PAP_USER = "raduser"
-PAP_PASSWD = "radpass"
-log_test.setLevel('INFO')
-
-class PAPAuthTest(EapolPacket, CordTester):
-
-    PAPStateTable = Enumeration("PAPStateTable", ("ST_EAP_SETUP",
-                                                  "ST_EAP_START",
-                                                  "ST_EAP_ID_REQ",
-                                                  "ST_EAP_PAP_USER_REQ",
-                                                  "ST_EAP_PAP_PASSWD_REQ",
-                                                  "ST_EAP_PAP_DONE"
-                                                  )
-                                )
-    PAPEventTable = Enumeration("PAPEventTable", ("EVT_EAP_SETUP",
-                                                  "EVT_EAP_START",
-                                                  "EVT_EAP_ID_REQ",
-                                                  "EVT_EAP_PAP_USER_REQ",
-                                                  "EVT_EAP_PAP_PASSWD_REQ",
-                                                  "EVT_EAP_PAP_DONE"
-                                                  )
-                                )
-    def __init__(self, intf = 'veth0'):
-        self.fsmTable = PAPAuthHolder.initPAPAuthHolderFsmTable(self, self.PAPStateTable, self.PAPEventTable)
-        EapolPacket.__init__(self, intf)
-        CordTester.__init__(self, self.fsmTable, self.PAPStateTable.ST_EAP_PAP_DONE)
-                            #self.PAPStateTable, self.PAPEventTable)
-        self.currentState = self.PAPStateTable.ST_EAP_SETUP
-        self.currentEvent = self.PAPEventTable.EVT_EAP_SETUP
-        self.nextState = None
-        self.nextEvent = None
-
-    def _eapSetup(self):
-        print 'Inside EAP PAP Setup'
-        self.setup()
-        self.nextEvent = self.PAPEventTable.EVT_EAP_START
-
-    def _eapStart(self):
-        print 'Inside EAP PAP Start'
-        self.eapol_start()
-        self.nextEvent = self.PAPEventTable.EVT_EAP_ID_REQ
-
-    def _eapIdReq(self):
-        log_test.info( 'Inside EAP ID Req' )
-        def eapol_cb(pkt):
-                log_test.info('Got EAPOL packet with type id and code request')
-                log_test.info('Packet code: %d, type: %d, id: %s', pkt[EAP].code, pkt[EAP].type, pkt[EAP].id)
-                log_test.info("<====== Send EAP Response with identity = %s ================>" % PAP_USER)
-                self.eapol_id_req(pkt[EAP].id, PAP_USER)
-
-        self.eapol_scapy_recv(cb = eapol_cb,
-                              lfilter = lambda pkt: pkt[EAP].type == EAP.TYPE_ID and pkt[EAP].code == EAP.REQUEST)
-        self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_USER_REQ
-
-    def _eapPAPUserReq(self):
-        log_test.info('UserReq Inside Challenge')
-        def eapol_cb(pkt):
-                log_test.info('Got EAPOL packet with type id and code request')
-                log_test.info('Packet code: %d, id: %s', pkt[EAP].code, pkt[EAP].id)
-                log_test.info('Send EAP Response for id %s with Password = %s' %(pkt[EAP].id, PAP_PASSWD) )
-                self.eapol_id_req(pkt[EAP].id, PAP_PASSWD)
-
-        self.eapol_scapy_recv(cb = eapol_cb,
-                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
-        #self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_PASSWD_REQ
-        self.nextEvent = None
-
-    def _eapPAPPassReq(self):
-        log_test.info('PassReq Inside Challenge')
-        def eapol_cb(pkt):
-                log_test.info('Got EAPOL packet with type id and code request')
-                log_test.info('Packet code: %d, type: %d', pkt[EAP].code, pkt[EAP].type)
-
-        self.eapol_scapy_recv(cb = eapol_cb,
-                              lfilter = lambda pkt: pkt[EAP].code == EAP.SUCCESS)
-        self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_DONE
diff --git a/src/test/utils/EapTLS.py b/src/test/utils/EapTLS.py
deleted file mode 100644
index 2655722..0000000
--- a/src/test/utils/EapTLS.py
+++ /dev/null
@@ -1,577 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import sys, os
-from EapolAAA import *
-from Enum import *
-import noseTlsAuthHolder as tlsAuthHolder
-from scapy_ssl_tls.ssl_tls import *
-from scapy_ssl_tls.ssl_tls_crypto import *
-from tls_cert import Key
-from socket import *
-from CordTestServer import cord_test_radius_restart
-import struct
-import scapy
-from nose.tools import *
-from CordTestBase import CordTester
-from CordContainer import *
-from CordTestUtils import log_test
-import re
-import time
-
-log_test.setLevel('INFO')
-
-def bytes_to_num(data):
-    try:
-        return int(data.encode('hex'), 16)
-    except:
-        print('Exception')
-        return -1
-
-class TLSAuthTest(EapolPacket, CordTester):
-
-    tlsStateTable = Enumeration("TLSStateTable", ("ST_EAP_SETUP",
-                                                  "ST_EAP_START",
-                                                  "ST_EAP_ID_REQ",
-                                                  "ST_EAP_TLS_HELLO_REQ",
-                                                  "ST_EAP_TLS_CERT_REQ",
-                                                  "ST_EAP_TLS_CHANGE_CIPHER_SPEC",
-                                                  "ST_EAP_TLS_FINISHED",
-                                                  "ST_EAP_TLS_DONE"
-                                                  )
-                                )
-    tlsEventTable = Enumeration("TLSEventTable", ("EVT_EAP_SETUP",
-                                                  "EVT_EAP_START",
-                                                  "EVT_EAP_ID_REQ",
-                                                  "EVT_EAP_TLS_HELLO_REQ",
-                                                  "EVT_EAP_TLS_CERT_REQ",
-                                                  "EVT_EAP_TLS_CHANGE_CIPHER_SPEC",
-                                                  "EVT_EAP_TLS_FINISHED",
-                                                  "EVT_EAP_TLS_DONE"
-                                                  )
-                                )
-    server_hello_done_signature = '\x0e\x00\x00\x00'
-    SERVER_HELLO = '\x02'
-    SERVER_CERTIFICATE = '\x0b'
-    CERTIFICATE_REQUEST = '\x0d'
-    SERVER_HELLO_DONE = '\x0e'
-    SERVER_UNKNOWN = '\xff'
-    HANDSHAKE = '\x16'
-    CHANGE_CIPHER = '\x14'
-    TLS_OFFSET = 28
-    HDR_IDX = 0
-    DATA_IDX = 1
-    CB_IDX = 2
-
-    #this is from client.crt file
-    CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTb21ld2hlcmUxEzARBgNVBAoMCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAwwd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTcwMzExMDA0NzQ0WhcN
-MjIxMDMxMDA0NzQ0WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNV
-BAoMCkNpZW5hIEluYy4xFzAVBgNVBAMMDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOTxi5+TUuaosUh7f/4U9unLV3qHCC7Sf3e0o8F+FP4BVQiIslUTEupw
-gd3VkiVAhxX74Oc3w7XF98msKx7dTetpQaZPZgEgGiDmzGzlAGkwqD4FO5JotJMg
-I0rpcYw+M/z+WDVgJx3I+VGyC6lFKhIJWTlTySheyMYpcgSvOuN4z8keCNg2uaRZ
-IOvIf6aUCTF7fp2HC/468/3T6R/VuYKFNdpsyU1ogbQqCUL4WHM2uWz6G7rzXI0/
-skkKemoGouFw+0I3ydPpGpiWK4NpPKHax4dRGaO1NmTDCtrJOAKPQx8CYYH0HMNp
-BbYvidTnHBxYUBrWNpzOJ/xgopawA8kCAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAosa5wV/RA2XO/SVGWx5K
-JkQNBn6zKuLRODdkm1UwpkdSZpYoXw2ds6YPPbxV9gFeitCcPZQUho38Fg1LYrNW
-9UsDPC2HAYSxrvCinTLC2NuqLsPj7NSu+41l37hGVG4WTjPXPqet33e0mqz1eYVn
-3SsGvZl98sgG6ADD1RqUb0gpEocmoN+yx5W0hIAwZQPBt4nfAWdGH9AcJTL/Gjr7
-5m4p3cQYm2AnROp+Bim4AELv02WIl77vYWivlzok1JfZ38GL94+CMLOSKNv9OLjc
-M/uh5Q6PB+4xv8kAjmO/Fq9T6f0KcQNNPY63omZCjrFiMEMvoOD4GIg+WV2SKLsQ
-0g==
------END CERTIFICATE-----"""
-
-    #this is from client.key
-    CLIENT_PRIV_KEY = """-----BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEA5PGLn5NS5qixSHt//hT26ctXeocILtJ/d7SjwX4U/gFVCIiy
-VRMS6nCB3dWSJUCHFfvg5zfDtcX3yawrHt1N62lBpk9mASAaIObMbOUAaTCoPgU7
-kmi0kyAjSulxjD4z/P5YNWAnHcj5UbILqUUqEglZOVPJKF7IxilyBK8643jPyR4I
-2Da5pFkg68h/ppQJMXt+nYcL/jrz/dPpH9W5goU12mzJTWiBtCoJQvhYcza5bPob
-uvNcjT+ySQp6agai4XD7QjfJ0+kamJYrg2k8odrHh1EZo7U2ZMMK2sk4Ao9DHwJh
-gfQcw2kFti+J1OccHFhQGtY2nM4n/GCilrADyQIDAQABAoIBAF+xepvDl3Yj8p4K
-SPBp0N7eCH2FbW0svOzLC1t4GMwmwGUlxex7YX+ucQnJGCIL+6q7skDS9THIQo8A
-MLHg7I3GXBNowokb4u/3qGCnw2k0Vk4+H61NqJSKbVYFh1mIMnC/2xzMSO1RMKzu
-D6O77h7F245zr/P40lDJyAefOq0S6sgZqxmHmoRTHhp0tXV4mkzV7P7RqoJrvAiG
-tGMk5OfdoILnNfoeXNC50nw201UK7xhrrqqlAWZRAaUZJtsb1GxW+jOra6OtsCVg
-AKS/FxzUGMsoqluM5mHLBEN69DAvBBs8g7UVKdmCDZC+feJ31eAnPAoc1gxBHdQU
-pCnA8dECgYEA+Q6X80gnoyG0g66Gh62R7RgDLYPpgkZimLeoX49mwizAUkeSf/Mj
-raVajRmJ8J1n4UklHdQe0PE9Jhuxo4Uo9sP71ZqpQPEvN35/Sw0xxQHcwxD73SWa
-UEVsnWIDJ6QrkoBOhjDMM6tyDSPVDS23352E6sZ9EU45qWvncb5OTdUCgYEA61Np
-Qs/CpEWtPG8IiEPKPEWUEwoO8SS6C4R/UfXNC96GhfIpA4Uy3fQwTUtHEMPL+7lh
-SPFPQDBH90jOTYg30EfHiBMlKW4l21XS+PotTP3ktqZMgx06SnoM2a/+crpzFqkb
-i4eAPCsdTispElbtqleLuUbFO9aG3jHMsK2RtCUCgYB04G9YYL0RJgkTXryNQVvg
-ussK+gOD+kncxxtBtQcRCnU6Z5INb2mH3LgwzRJZk1SjeXLsm5XWkc8Tah2j0iKW
-IwS0if7xlf2Felx8OPXpMOWLuRWpAzN2hg3hkZRPbxBvkLzI5m99s/Ay0GTz6UeH
-reEpV/prO519r0COtTMD/QKBgCdRinbVS8oysh002BIccX/ciD8eIRz9a/BctQE2
-tonTJlre+SdTbApVsHRZrYgJjt2CPdT4LKum5X9VtNKTfe/Y7me3+y+O7dhV4Kgk
-9Mi2ay5xXrtReNnUxqzgkP0OVghlPOr1OuHSulTDNVuRFqitc/UC9BVpZKNfYrnq
-ZjvZAoGBALzgzXajgIdhghCt5PFLkhh3xyOliTXWFstHcMZdQF2wQyeF/uQ2zrC/
-2t1Sa+egV3QDUPYzW9YLQs9eaLh7MS9wCHLY2SMElAqYiNjRfkT4wWdPfeyFx4+E
-Euwtu+lPJ7sEpNu5jX63OS2AeZsQYlsT0Ai+lB4TeyoE6Pj04iC0
------END RSA PRIVATE KEY-----"""
-
-    def handle_server_hello_done(self, server_hello_done):
-        if server_hello_done[-4:] == self.server_hello_done_signature:
-	    log_test.info('server hello received over interface %s' %self.intf)
-            self.server_hello_done_received = True
-
-    def __init__(self, intf = 'veth0', client_cert = None, client_priv_key = None,
-                 fail_cb = None, src_mac='default', version = "TLS_1_0", session_id = '',
-                 session_id_length = None, gmt_unix_time=1234, invalid_content_type = 22,
-                 record_fragment_length = None, cipher_suites_length = None,
-                 compression_methods_length = None, compression_methods = TLSCompressionMethod.NULL,
-                 CipherSuite = True, cipher_suite = 'RSA_WITH_AES_256_CBC_SHA', id_mismatch_in_identifier_response_packet = False,
-                 id_mismatch_in_client_hello_packet = False , dont_send_client_certificate = False,
-                 dont_send_client_hello = False, restart_radius = False, invalid_client_hello_handshake_type = False,
-                 invalid_cert_req_handshake = False, incorrect_tlsrecord_type_cert_req = False,
-                 invalid_client_hello_handshake_length = False, clientkeyex_replace_with_serverkeyex = False):
-
-        self.fsmTable = tlsAuthHolder.initTlsAuthHolderFsmTable(self, self.tlsStateTable, self.tlsEventTable)
-        EapolPacket.__init__(self, intf)
-        CordTester.__init__(self, self.fsmTable, self.tlsStateTable.ST_EAP_TLS_DONE)
-                            #self.tlsStateTable, self.tlsEventTable)
-        self.currentState = self.tlsStateTable.ST_EAP_SETUP
-        self.currentEvent = self.tlsEventTable.EVT_EAP_SETUP
-	self.src_mac = src_mac
-	self.version = version
-        self.session_id_length = session_id_length
-        self.session_id = session_id
-        self.gmt_unix_time = gmt_unix_time
-        self.invalid_content_type = invalid_content_type
-        self.CipherSuite = CipherSuite
-        self.cipher_suites_length = cipher_suites_length
-        self.compression_methods_length = compression_methods_length
-        self.cipher_suite = cipher_suite
-        self.compression_methods_length = compression_methods_length
-        self.compression_methods = compression_methods
-        self.record_fragment_length = record_fragment_length
-	self.invalid_client_hello_handshake_type = invalid_client_hello_handshake_type
-	self.invalid_client_hello_handshake_length = invalid_client_hello_handshake_length
-	self.invalid_cert_req_handshake = invalid_cert_req_handshake
-        self.id_mismatch_in_identifier_response_packet = id_mismatch_in_identifier_response_packet
-        self.id_mismatch_in_client_hello_packet = id_mismatch_in_client_hello_packet
-        self.dont_send_client_certificate = dont_send_client_certificate
-        self.dont_send_client_hello = dont_send_client_hello
-	self.incorrect_tlsrecord_type_cert_req = incorrect_tlsrecord_type_cert_req
-	self.restart_radius = restart_radius
-	self.clientkeyex_replace_with_serverkeyex = clientkeyex_replace_with_serverkeyex
-        self.nextState = None
-        self.nextEvent = None
-        self.pending_bytes = 0 #for TLS fragment reassembly
-        self.server_hello_done_received = False
-        self.server_hello_done_eap_id = 0
-        self.send_tls_response = True
-        self.server_certs = []
-        self.pkt_last = ''
-        self.pkt_history = []
-        self.pkt_map = { self.SERVER_HELLO: ['', '', lambda pkt: pkt ],
-                         self.SERVER_CERTIFICATE: ['', '', lambda pkt: pkt ],
-                         self.CERTIFICATE_REQUEST: ['', '', lambda pkt: pkt ],
-                         self.SERVER_HELLO_DONE: ['', '', self.handle_server_hello_done ],
-                         self.SERVER_UNKNOWN: ['', '', lambda pkt: pkt ]
-                       }
-	if self.clientkeyex_replace_with_serverkeyex:
-            self.tls_ctx = TLSSessionCtx(client = False)
-	else:
-	    self.tls_ctx = TLSSessionCtx(client = True)
-        self.client_cert = self.CLIENT_CERT if client_cert is None else client_cert
-        self.client_priv_key = self.CLIENT_PRIV_KEY if client_priv_key is None else client_priv_key
-        self.failTest = False
-        self.fail_cb = fail_cb
-
-    def load_tls_record(self, data, pkt_type = ''):
-        #if pkt_type not in [ self.SERVER_HELLO_DONE, self.SERVER_UNKNOWN ]:
-        if pkt_type == self.SERVER_HELLO_DONE:
-            data = str(TLSRecord(content_type=TLSContentType.HANDSHAKE)/data)
-        elif pkt_type == self.CERTIFICATE_REQUEST:
-            data = str(TLSRecord()/TLSHandshake(type=TLSHandshakeType.CERTIFICATE_REQUEST)/data[9:])
-            data = None #For now ignore this record
-        if data:
-            TLS(data, ctx = self.tls_ctx)
-
-    def pkt_update(self, pkt_type, data, hdr=None, reassembled = False):
-        if not self.pkt_map.has_key(pkt_type):
-            return
-        if hdr is not None:
-            self.pkt_map[pkt_type][self.HDR_IDX] += hdr
-        self.pkt_map[pkt_type][self.DATA_IDX] += data
-        if reassembled is True:
-            self.pkt_map[pkt_type][self.CB_IDX](self.pkt_map[pkt_type][self.DATA_IDX])
-            log_test.info('Interface %s, Appending packet type %02x to packet history of len %d'
-                          %(self.intf, ord(pkt_type), len(self.pkt_map[pkt_type][self.DATA_IDX])))
-            self.pkt_history.append(self.pkt_map[pkt_type][self.DATA_IDX])
-            data = ''.join(self.pkt_map[pkt_type][:self.DATA_IDX+1])
-            self.load_tls_record(data, pkt_type = pkt_type)
-            self.pkt_map[pkt_type][self.HDR_IDX] = ''
-            self.pkt_map[pkt_type][self.DATA_IDX] = ''
-
-    def tlsFail(self):
-        ##Force a failure
-	log_test.info('entering into testFail function for interface %s' %self.intf)
-        self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_FINISHED
-        self.nextState = self.tlsStateTable.ST_EAP_TLS_FINISHED
-        self.failTest = True
-
-    def eapol_server_hello_cb(self, pkt):
-        '''Reassemble and send response for server hello/certificate fragments'''
-        r = str(pkt)
-        offset = self.TLS_OFFSET
-        tls_data = r[offset:]
-        type_hdrlen = 0
-        if self.pending_bytes > 0:
-            if len(tls_data) >= self.pending_bytes:
-                self.pkt_update(self.pkt_last, tls_data[:self.pending_bytes], reassembled = True)
-                offset += self.pending_bytes
-                self.pkt_last = ''
-                self.pending_bytes = 0
-            else:
-                self.pkt_update(self.pkt_last, tls_data)
-                self.pending_bytes -= len(tls_data)
-        print('Interface: %s, Offset: %d, pkt : %d, pending %d\n' %(self.intf, offset, len(pkt), self.pending_bytes))
-        while self.pending_bytes == 0 and offset < len(pkt):
-            tls_data = r[offset:]
-            hexdump(tls_data)
-            self.pending_bytes = bytes_to_num(tls_data[3:5])
-            if self.pending_bytes < 0:
-                self.pending_bytes = 0
-                return
-            if tls_data[0] == self.HANDSHAKE:
-                pkt_type = tls_data[5]
-                if pkt_type in [ self.CERTIFICATE_REQUEST ]:
-                    self.pending_bytes = bytes_to_num(tls_data[6:9])
-                    type_hdrlen = 4
-                if len(tls_data) - 5 - type_hdrlen >= self.pending_bytes:
-                    data_received = tls_data[5: 5 + type_hdrlen + self.pending_bytes ]
-                    offset += 5 + type_hdrlen + self.pending_bytes
-                    type_hdrlen = 0
-                    self.pending_bytes = 0
-                    self.pkt_update(pkt_type, data_received,
-                                    hdr = tls_data[:5],
-                                    reassembled = True)
-                else:
-                    self.pkt_update(pkt_type, tls_data[5:],
-                                    hdr = tls_data[:5],
-                                    reassembled = False)
-                    self.pending_bytes -= len(tls_data) - 5 - type_hdrlen
-                    self.pkt_last = pkt_type
-                    log_test.info('Interface: %s, Pending bytes left %d' %(self.intf, self.pending_bytes))
-                    assert self.pending_bytes > 0
-            elif tls_data[0] == self.SERVER_HELLO_DONE:
-                self.server_hello_done_eap_id = pkt[EAP].id
-                self.pkt_update(tls_data[0], tls_data, reassembled = True)
-                break
-            else:
-                self.pkt_last = self.SERVER_UNKNOWN
-                if len(tls_data) - 5 >= self.pending_bytes:
-                    offset += 5 + self.pending_bytes
-                    self.pending_bytes = 0
-                    self.pkt_last = ''
-
-        #send TLS response ack till we receive server hello done
-        if self.server_hello_done_received == False:
-            eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, TLS_LENGTH_INCLUDED, '')
-            self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-
-    def _eapSetup(self):
-	#if self.src_mac == 'bcast':self.setup(src_mac='bcast')
-	#if self.src_mac == 'mcast': self.setup(src_mac='mcast')
-	#if self.src_mac == 'zeros': self.setup(src_mac='zeros')
-	#if self.src_mac == 'default': self.setup(src_mac='default')
-	#log_test.info('Interface: %s, Source mac is %s' %(self.intf, self.src_mac))
-	self.setup(src_mac=self.src_mac)
-        self.nextEvent = self.tlsEventTable.EVT_EAP_START
-
-    def _eapStart(self):
-	log_test.info('_eapStart method started over interface %s' %(self.intf))
-        self.eapol_start()
-        self.nextEvent = self.tlsEventTable.EVT_EAP_ID_REQ
-
-    def _eapIdReq(self):
-        log_test.info('Inside EAP ID Req for interface %s' %(self.intf))
-        def eapol_cb(pkt):
-                log_test.info('Got EAPOL packet with type id and code request for interface %s' %(self.intf))
-                log_test.info('Interface: %s, Packet code: %d, type: %d, id: %d' %(self.intf, pkt[EAP].code, pkt[EAP].type, pkt[EAP].id))
-                log_test.info("Send EAP Response with identity %s over interface %s" % (USER, self.intf))
-		if self.id_mismatch_in_identifier_response_packet:
-		    log_test.info('\nSending invalid id field in EAP Identity Response packet over interface %s' %(self.intf))
-                    self.eapol_id_req(pkt[EAP].id+10, USER)
-		else:
-		    self.eapol_id_req(pkt[EAP].id, USER)
-
-        r = self.eapol_scapy_recv(cb = eapol_cb,
-                                  lfilter =
-                                  lambda pkt: EAP in pkt and pkt[EAP].type == EAP.TYPE_ID and pkt[EAP].code == EAP.REQUEST)
-        if len(r) > 0:
-            self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_HELLO_REQ
-        else:
-            self.tlsFail()
-            return r
-
-    def _eapTlsHelloReq(self):
-
-        def eapol_cb(pkt):
-                log_test.info('Got hello request for id %d over interface %s', pkt[EAP].id, self.intf)
-                self.client_hello = TLSClientHello(version= self.version,
-                                                   gmt_unix_time=self.gmt_unix_time,
-                                                   random_bytes= '\xAB' * 28,
-                                                   session_id_length = self.session_id_length,
-                                                   session_id= self.session_id,
-                                                   compression_methods_length = self.compression_methods_length,
-                                                   compression_methods= self.compression_methods,
-                                                   cipher_suites_length = self.cipher_suites_length,
-                                                   cipher_suites=[self.cipher_suite]
-                                                   )
-		if self.invalid_client_hello_handshake_type:
-		    log_test.info('sending server_hello instead of client_hello handshape type in client hello packet')
-		    client_hello_data = TLSHandshake(type='server_hello')/self.client_hello
-		elif self.invalid_client_hello_handshake_length:
-		    log_test.info('sending TLS Handshake message with zero length field in client hello packet')
-		    client_hello_data = TLSHandshake(length=0)/self.client_hello
-		else:
-		    client_hello_data = TLSHandshake()/self.client_hello
-                #client_hello_data = TLSHandshake()/self.client_hello
-                self.pkt_history.append( str(client_hello_data) )
-		if self.record_fragment_length:
-                    reqdata = TLSRecord(length=self.record_fragment_length)/client_hello_data
-		else:
-		    reqdata = TLSRecord()/client_hello_data
-                self.load_tls_record(str(reqdata))
-                log_test.info("Sending Client Hello TLS payload of len %d, id %d over interface %s" %(len(reqdata),pkt[EAP].id, self.intf))
-		if self.id_mismatch_in_client_hello_packet:
-                    log_test.info('\nsending invalid id field in client hello packet')
-                    eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id+10, TLS_LENGTH_INCLUDED, str(reqdata))
-                else:
-                    eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, TLS_LENGTH_INCLUDED, str(reqdata))
-                if self.dont_send_client_hello:
-                    log_test.info('\nskipping client hello packet sending part')
-                    pass
-                else:
-                    self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-		if self.restart_radius:
-                    cord_test_radius_restart()
-
-        r = self.eapol_scapy_recv(cb = eapol_cb,
-                                  lfilter =
-                                  lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
-
-        if len(r) == 0:
-            self.tlsFail()
-            return r
-
-        #move to client/server certificate request
-        self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_CERT_REQ
-
-    def get_verify_data(self):
-        all_handshake_pkts = ''.join(self.pkt_history)
-        return self.tls_ctx.get_verify_data(data = all_handshake_pkts)
-
-    def get_verify_signature(self, pem_data):
-        all_handshake_pkts = ''.join(self.pkt_history)
-        k = Key(pem_data)
-        signature = k.sign(all_handshake_pkts, t = 'pkcs', h = 'tls')
-        signature_data = '{}{}'.format(struct.pack('!H', len(signature)), signature)
-        return signature_data
-
-    def get_encrypted_handshake_msg(self, finish_val=''):
-        if not finish_val:
-            finish_val = self.get_verify_data()
-        msg = str(TLSHandshake(type=TLSHandshakeType.FINISHED)/finish_val)
-        crypto_container = CryptoContainer(self.tls_ctx, data = msg,
-                                           content_type = TLSContentType.HANDSHAKE)
-        return crypto_container.encrypt()
-
-    def get_encrypted_application_msg(self, msg = ''):
-        '''Needed with tunneled TLS'''
-        if not msg:
-            msg = 'test data'
-        return to_raw(TLSPlaintext(data = 'GET / HTTP/1.1\r\nHOST: localhost\r\n\r\n'), self.tls_ctx)
-
-    def _eapTlsCertReq(self):
-        log_test.info('Receiving server certificates over interface %s', self.intf)
-        while self.server_hello_done_received == False:
-            r = self.eapol_scapy_recv(cb = self.eapol_server_hello_cb,
-                                      lfilter =
-                                      lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
-                                          pkt[EAP].code == EAP.REQUEST)
-            if len(r) == 0:
-                self.tlsFail()
-                return r
-        log_test.info('Sending client certificate request over interface %s', self.intf)
-        rex_pem = re.compile(r'\-+BEGIN[^\-]+\-+(.*?)\-+END[^\-]+\-+', re.DOTALL)
-        if self.client_cert:
-            der_cert = rex_pem.findall(self.client_cert)[0].decode("base64")
-            client_certificate_list = TLSHandshake()/TLSCertificateList(
-                certificates=[TLSCertificate(data=x509.X509Cert(der_cert))])
-        else:
-            client_certificate_list = TLSHandshake()/TLSCertificateList(certificates=[])
-        client_certificate = TLSRecord(version="TLS_1_0")/client_certificate_list
-	kex_data = self.tls_ctx.get_client_kex_data()
-        client_key_ex_data = TLSHandshake()/kex_data
-        client_key_ex = TLSRecord()/client_key_ex_data
-        if self.client_cert:
-            self.load_tls_record(str(client_certificate))
-            self.pkt_history.append(str(client_certificate_list))
-        self.load_tls_record(str(client_key_ex))
-        self.pkt_history.append(str(client_key_ex_data))
-        verify_signature = self.get_verify_signature(self.client_priv_key)
-	if self.invalid_cert_req_handshake:
-	    log_test.info("sending 'certificate-request' type of handshake message instead of 'certificate-verify' type")
-	    client_cert_verify = TLSHandshake(type=TLSHandshakeType.CERTIFICATE_REQUEST)/verify_signature
-	else:
-            client_cert_verify = TLSHandshake(type=TLSHandshakeType.CERTIFICATE_VERIFY)/verify_signature
-	if self.incorrect_tlsrecord_type_cert_req:
-	    log_test.info("sending TLS Record type as ALERT instead of HANDSHAKE in certificate request packet")
-            client_cert_record = TLSRecord(content_type=TLSContentType.ALERT)/client_cert_verify
-	else:
-	    client_cert_record = TLSRecord(content_type=TLSContentType.HANDSHAKE)/client_cert_verify
-        self.pkt_history.append(str(client_cert_verify))
-        #log_test.info('Interface: %s, TLS ctxt: %s' %(self.intf, self.tls_ctx))
-        client_ccs = TLSRecord(version="TLS_1_0")/TLSChangeCipherSpec()
-        enc_handshake_msg = self.get_encrypted_handshake_msg()
-	if self.invalid_content_type:
-            handshake_msg = str(TLSRecord(content_type=self.invalid_content_type)/enc_handshake_msg)
-	else:
-	    handshake_msg = str(TLSRecord(content_type=TLSContentType.HANDSHAKE)/enc_handshake_msg)
-        reqdata = str(TLS.from_records([client_certificate, client_key_ex, client_cert_record, client_ccs]))
-        reqdata += handshake_msg
-        log_test.info("Sending Client Hello TLS Certificate payload of len %d over interface %s" %(len(reqdata), self.intf))
-	if self.dont_send_client_certificate:
-	    log_test.info('\nskipping sending client certificate part')
-	    pass
-	else:
-            status = self.eapFragmentSend(EAP_RESPONSE, self.server_hello_done_eap_id, TLS_LENGTH_INCLUDED,
-                                      payload = reqdata, fragsize = 1024)
-            assert_equal(status, True)
-            self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_CHANGE_CIPHER_SPEC
-
-    def _eapTlsCertReq_delay(self):
-        self.server_hello_done_received = True
-        log_test.info('Sending client certificate request over interface %s', self.intf)
-        rex_pem = re.compile(r'\-+BEGIN[^\-]+\-+(.*?)\-+END[^\-]+\-+', re.DOTALL)
-
-        if self.client_cert:
-           der_cert = rex_pem.findall(self.client_cert)[0].decode("base64")
-           client_certificate_list = TLSHandshake()/TLSCertificateList(
-                                                    certificates=[TLSCertificate(data=x509.X509Cert(der_cert))])
-        else:
-           client_certificate_list = TLSHandshake()/TLSCertificateList(certificates=[])
-
-        client_certificate = TLSRecord(version="TLS_1_0")/client_certificate_list
-	kex_data = self.tls_ctx.get_client_kex_data()
-        client_key_ex_data = TLSHandshake()/kex_data
-        client_key_ex = TLSRecord()/client_key_ex_data
-
-        if self.client_cert:
-           self.load_tls_record(str(client_certificate))
-           self.pkt_history.append(str(client_certificate_list))
-
-        self.load_tls_record(str(client_key_ex))
-        self.pkt_history.append(str(client_key_ex_data))
-        verify_signature = self.get_verify_signature(self.client_priv_key)
-
-	if self.invalid_cert_req_handshake:
-	   log_test.info("Sending 'certificate-request' type of handshake message instead of 'certificate-verify' type")
-	   client_cert_verify = TLSHandshake(type=TLSHandshakeType.CERTIFICATE_REQUEST)/verify_signature
-	else:
-           client_cert_verify = TLSHandshake(type=TLSHandshakeType.CERTIFICATE_VERIFY)/verify_signature
-
-	if self.incorrect_tlsrecord_type_cert_req:
-	   log_test.info("Sending TLS Record type as ALERT instead of HANDSHAKE in certificate request packet")
-           client_cert_record = TLSRecord(content_type=TLSContentType.ALERT)/client_cert_verify
-	else:
-	   client_cert_record = TLSRecord(content_type=TLSContentType.HANDSHAKE)/client_cert_verify
-
-        self.pkt_history.append(str(client_cert_verify))
-        #log_test.info('TLS ctxt: %s' %self.tls_ctx)
-        client_ccs = TLSRecord(version="TLS_1_0")/TLSChangeCipherSpec()
-        enc_handshake_msg = self.get_encrypted_handshake_msg()
-
-	if self.invalid_content_type:
-            handshake_msg = str(TLSRecord(content_type=self.invalid_content_type)/enc_handshake_msg)
-	else:
-	    handshake_msg = str(TLSRecord(content_type=TLSContentType.HANDSHAKE)/enc_handshake_msg)
-        reqdata = str(TLS.from_records([client_certificate, client_key_ex, client_cert_record, client_ccs]))
-        reqdata += handshake_msg
-        log_test.info("Sending Client Hello TLS Certificate payload of len %d over interface %s" %(len(reqdata), self.intf))
-
-	if self.dont_send_client_certificate:
-	   log_test.info('\nSkipping sending client certificate part')
-	   pass
-	else:
-           status = self.eapFragmentSend(EAP_RESPONSE, self.server_hello_done_eap_id, TLS_LENGTH_INCLUDED,
-                                      payload = reqdata, fragsize = 1024)
-           assert_equal(status, True)
-           self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_CHANGE_CIPHER_SPEC
-
-    def _eapTlsChangeCipherSpec(self):
-        def eapol_cb(pkt):
-            r = str(pkt)
-	    log_test.info('Interface %s. Received data in change cipher spec function is %s'%(self.intf, pkt.show()))
-            tls_data = r[self.TLS_OFFSET:]
-            log_test.info('Verifying TLS Change Cipher spec record type %x over interface %s' %(ord(tls_data[0]), self.intf))
-            assert tls_data[0] == self.CHANGE_CIPHER
-            log_test.info('Handshake finished. Sending empty data over interface %s' %self.intf)
-            eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, 0, '')
-            self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-
-        r = self.eapol_scapy_recv(cb = eapol_cb,
-                                  lfilter =
-                                  lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
-        if len(r) > 0:
-            self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_FINISHED
-        else:
-            self.tlsFail()
-            return r
-
-    def _eapTlsFinished(self):
-        self.nextEvent = None
-        def eapol_cb(pkt):
-            log_test.info('Server authentication successfull over interface %s' %self.intf)
-
-        timeout = 5
-        if self.failTest is True:
-            if self.fail_cb is not None:
-                self.fail_cb()
-                return
-            timeout = None ##Wait forever on failure and force testcase timeouts
-
-        self.eapol_scapy_recv(cb = eapol_cb,
-                              lfilter =
-                              lambda pkt: EAP in pkt and pkt[EAP].code == EAP.SUCCESS,
-                              timeout = timeout)
-        self.eapol_logoff()
diff --git a/src/test/utils/EapolAAA.py b/src/test/utils/EapolAAA.py
deleted file mode 100644
index e9094c8..0000000
--- a/src/test/utils/EapolAAA.py
+++ /dev/null
@@ -1,387 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-####  Authentication parameters
-from scapy.all import *
-from scapy_ssl_tls.ssl_tls import *
-from socket import *
-from struct import *
-import os
-import sys
-import binascii
-import shutil
-from nose.tools import assert_equal, assert_not_equal, assert_raises, assert_true
-from CordTestUtils import log_test
-
-USER = "raduser"
-PASS = "radpass"
-WRONG_USER = "XXXX"
-WRONG_PASS = "XXXX"
-NO_USER = ""
-NO_PASS = ""
-DEV = "tap0"
-ETHERTYPE_PAE = 0x888e
-PAE_GROUP_ADDR = "\xff\xff\xff\xff\xff\xff"
-EAPOL_VERSION = 1
-EAPOL_EAPPACKET = 0
-EAPOL_START = 1
-EAPOL_LOGOFF = 2
-EAPOL_KEY = 3
-EAPOL_ASF = 4
-EAP_REQUEST = 1
-EAP_RESPONSE = 2
-EAP_SUCCESS = 3
-EAP_FAILURE = 4
-EAP_TYPE_ID = 1
-EAP_TYPE_MD5 = 4
-EAP_TYPE_MSCHAP = 26
-EAP_TYPE_TLS = 13
-cCertMsg = '\x0b\x00\x00\x03\x00\x00\x00'
-TLS_LENGTH_INCLUDED = 0x80
-TLS_MORE_FRAGMENTS = 0x40
-RADIUS_USER_MAC_START = (0x02 << 40) | (0x03 << 32) | (0x04 << 24) | 1
-RADIUS_USER_MAC_END =  (0x02 << 40) | (0x03 << 32) | (0x04 << 24) | (0xff << 16) | ( 0xff << 8 ) | 0xff
-
-class EapolPacket(object):
-
-    src_mac_map = { 'bcast': 'ff:ff:ff:ff:ff:ff',
-                    'mcast': '01:80:C2:00:00:03',
-                    'zeros': '00:00:00:00:00:00',
-                    'default': None
-                    }
-
-    def __init__(self, intf = 'veth0'):
-        self.intf = intf
-        self.s = None
-        self.max_recv_size = 1600
-
-    def setup(self, src_mac = 'default'):
-        self.s = socket(AF_PACKET, SOCK_RAW, htons(ETHERTYPE_PAE))
-        self.s.bind((self.intf, ETHERTYPE_PAE))
-        self.mymac = self.s.getsockname()[4]
-        mac = None
-        mac_str = None
-        if src_mac == 'random':
-            mac = RandMAC()._fix()
-        elif src_mac in self.src_mac_map:
-            mac = self.src_mac_map[src_mac]
-        if mac is None:
-            mac = self.mymac
-            mac_str = binascii.hexlify(mac)
-        if mac_str is None:
-            mac_str = mac
-        self.llheader = Ether(dst = PAE_GROUP_ADDR, src = mac, type = ETHERTYPE_PAE)
-	log_test.info('llheader packet is %s'%self.llheader.show())
-	log_test.info('source mac of  packet is %s'%mac_str)
-        self.recv_sock = L2Socket(iface = self.intf, type = ETHERTYPE_PAE)
-
-    def cleanup(self):
-        if self.s is not None:
-            self.s.close()
-            self.s = None
-
-    def eapol(self, req_type, payload=""):
-        return EAPOL(version = EAPOL_VERSION, type = req_type)/payload
-
-    def eap(self, code, pkt_id, req_type=0, data=""):
-        return EAP(code = code, id = pkt_id, type = req_type)/data
-
-    def eapFragmentSend(self, code, pkt_id, flags = TLS_LENGTH_INCLUDED, payload = "", fragsize = 1024):
-        req_type = EAP_TYPE_TLS
-        if code in [ EAP_SUCCESS, EAP_FAILURE ]:
-            data = pack("!BBH", code, pkt_id, 4)
-            self.eapol_send(EAPOL_EAPPACKET, data)
-            return True
-
-        if len(payload) <= fragsize:
-            if flags & TLS_LENGTH_INCLUDED:
-                flags_dlen = pack("!BL", flags, len(payload))
-                data = pack("!BBHB", code, pkt_id, 5 + len(flags_dlen) + len(payload), req_type) \
-                       + flags_dlen + payload
-                self.eapol_send(EAPOL_EAPPACKET, data)
-                return True
-            flags_str = pack("!B", flags)
-            data = pack("!BBHB", code, pkt_id, 5+len(flags_str)+len(payload), req_type) + flags_str + payload
-            self.eapol_send(EAPOL_EAPPACKET, data)
-            return True
-
-        fragments = []
-        data = payload[:]
-        frag = 0
-        def eapol_frag_cb(pkt):
-            r = str(pkt)
-            tls_data = r[self.TLS_OFFSET:]
-            frag_data = fragments[frag]
-            ##change packet id in response to match request
-            eap_payload = frag_data[:1] + pack("!B", pkt[EAP].id) + frag_data[2:]
-            self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-
-        while len(data) > 0:
-            data_frag = data[:fragsize]
-            data = data[fragsize:]
-            if frag == 0:
-                ##first frag, include the total length
-                flags_dlen = pack("!BL", TLS_LENGTH_INCLUDED | TLS_MORE_FRAGMENTS, len(payload))
-                fragments.append(pack("!BBHB", code, pkt_id, 5 + len(flags_dlen) + len(data_frag), req_type) \
-                                   + flags_dlen + data_frag)
-            else:
-                if len(data) > 0:
-                    flags = TLS_MORE_FRAGMENTS
-                else:
-                    flags = 0
-                flags_str = pack("!B", flags)
-                fragments.append(pack("!BBHB", code, pkt_id, 5+len(flags_str)+len(data_frag), req_type) + \
-                                   flags_str + data_frag)
-            frag += 1
-
-        frag = 0
-        self.eapol_send(EAPOL_EAPPACKET, fragments[frag])
-        for frag in range(len(fragments)-1):
-            frag += 1
-            r = self.eapol_scapy_recv(cb = eapol_frag_cb,
-                                      lfilter = lambda pkt: EAP in pkt and pkt[EAP].type == EAP_TYPE_TLS and \
-                                          pkt[EAP].code == EAP.REQUEST)
-
-        return True
-
-    def eapTLS(self, code, pkt_id, flags = TLS_LENGTH_INCLUDED, data=""):
-        req_type = EAP_TYPE_TLS
-        if code in [EAP_SUCCESS, EAP_FAILURE]:
-            return pack("!BBH", code, pkt_id, 4)
-        else:
-            if flags & TLS_LENGTH_INCLUDED:
-                flags_dlen = pack("!BL", flags, len(data))
-                return pack("!BBHB", code, pkt_id, 5+len(flags_dlen)+len(data), req_type) + flags_dlen + data
-            flags_str = pack("!B", flags)
-            return pack("!BBHB", code, pkt_id, 5+len(flags_str)+len(data), req_type) + flags_str + data
-
-    def eapTLSFragment(self, code, pkt_id, frag, data="", data_len = 0):
-        req_type = EAP_TYPE_TLS
-        if frag == 0:
-            flags = TLS_LENGTH_INCLUDED | TLS_MORE_FRAGMENTS
-        elif frag > 0:
-            flags = TLS_MORE_FRAGMENTS
-        else:
-            #last fragment
-            flags = 0
-        if data_len == 0:
-            data_len = len(data)
-        if flags & TLS_LENGTH_INCLUDED:
-            flags_dlen = pack("!BL", flags, data_len)
-            return pack("!BBHB", code, pkt_id, 5+len(flags_dlen)+len(data), req_type) + flags_dlen + data
-        flags_str = pack("!B", flags)
-        return pack("!BBHB", code, pkt_id, 5+len(flags_str)+len(data), req_type) + flags_str + data
-
-    def eapol_send(self, eapol_type, eap_payload):
-        return sendp(self.llheader/self.eapol(eapol_type, eap_payload), iface=self.intf)
-
-    def eapol_recv(self):
-        p = self.s.recv(self.max_recv_size)[14:]
-        vers,pkt_type,eapollen  = unpack("!BBH",p[:4])
-        print "Version %d, type %d, len %d" %(vers, pkt_type, eapollen)
-        assert_equal(pkt_type, EAPOL_EAPPACKET)
-        return p[4:]
-
-    def eapol_scapy_recv(self, cb = None, lfilter = None, count = 1, timeout = 10):
-        def eapol_default_cb(pkt): pass
-        if cb is None:
-            cb = eapol_default_cb
-        return sniff(prn = cb, lfilter = lfilter, count = count, timeout = timeout, opened_socket = self.recv_sock)
-
-    def eapol_start(self):
-        eap_payload = self.eap(EAPOL_START, 2)
-        return self.eapol_send(EAPOL_START, eap_payload)
-
-    def eapol_logoff(self):
-        eap_payload = self.eap(EAPOL_LOGOFF, 2)
-        return self.eapol_send(EAPOL_LOGOFF, eap_payload)
-
-    def eapol_id_req(self, pkt_id = 0, user = USER):
-        eap_payload = self.eap(EAP_RESPONSE, pkt_id, EAP_TYPE_ID, user)
-        return self.eapol_send(EAPOL_EAPPACKET, eap_payload)
-
-    def eap_md5_challenge_recv(self,rad_pwd):
-        PASS = rad_pwd
-        print 'Inside EAP MD5 Challenge Exchange'
-        p = self.s.recv(self.max_recv_size)[14:]
-        vers,pkt_type,eapollen  = unpack("!BBH",p[:4])
-        print "EAPOL Version %d, type %d, len %d" %(vers, pkt_type, eapollen)
-        code, pkt_id, eaplen = unpack("!BBH", p[4:8])
-        print "EAP Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[8:9])[0]
-        reqdata = p[9:4+eaplen]
-        print 'Request type is %d' %(reqtype)
-        assert_equal(reqtype, EAP_TYPE_MD5)
-        challenge=pack("!B",pkt_id)+PASS+reqdata[1:]
-        print "Generating md5 challenge for %s" % challenge
-        return (challenge,pkt_id)
-
-    def eap_Status(self):
-        print 'Inside EAP Status'
-        p = self.s.recv(self.max_recv_size)[14:]
-        code, id, eaplen = unpack("!BBH", p[4:8])
-        return code
-
-    @classmethod
-    def eap_invalid_tls_packets_info(self, invalid_field_name = None, invalid_field_value = None):
-        log_test.info( 'Changing invalid field values in tls auth packets' )
-        if invalid_field_name == 'eapolTlsVersion':
-           global EAPOL_VERSION
-           log_test.info( 'Changing invalid field values in tls auth packets====== version changing' )
-           EAPOL_VERSION = invalid_field_value
-        if invalid_field_name == 'eapolTlsType':
-           global EAP_TYPE_TLS
-           log_test.info( 'Changing invalid field values in tls auth packets====== EAP TYPE TLS changing' )
-           EAP_TYPE_TLS = invalid_field_value
-        if invalid_field_name == 'eapolTypeID':
-           global EAP_TYPE_ID
-           log_test.info( 'Changing invalid field values in tls auth packets====== EAP TYPE TLS changing' )
-           EAP_TYPE_ID = invalid_field_value
-        if invalid_field_name == 'eapolResponse':
-           global EAP_RESPONSE
-           log_test.info( 'Changing invalid field values in tls auth packets====== EAP TYPE TLS changing' )
-           EAP_RESPONSE = invalid_field_value
-
-
-    @classmethod
-    def eap_tls_packets_field_value_replace(self, invalid_field_name = None):
-        log_test.info( 'Changing invalid field values in tls auth packets' )
-        if invalid_field_name == 'eapolTlsVersion':
-           global EAPOL_VERSION
-           EAPOL_VERSION = 1
-           log_test.info( 'Changing invalid field values in tls auth packets====== version changing' )
-        if invalid_field_name == 'eapolTlsType':
-           global EAP_TYPE_TLS
-           EAP_TYPE_TLS = 13
-           log_test.info( 'Changing invalid field values in tls auth packets====== version changing' )
-        if invalid_field_name == 'eapolTypeID':
-           global EAP_TYPE_ID
-           EAP_TYPE_ID = 1
-           log_test.info( 'Changing invalid field values in tls auth packets====== version changing' )
-        if invalid_field_name == 'eapolResponse':
-           global EAP_RESPONSE
-           EAP_RESPONSE = 2
-           log_test.info( 'Changing invalid field values in tls auth packets====== version changing' )
-
-def get_radius_macs(num, start = 0, end = 0):
-    """Generate radius server mac addresses"""
-    """Scope to generate 256*256*256 mac addresses"""
-    if start == 0 or end == 0:
-        s = (0x00 << 40) | (0x02 << 32) | ( 0x03 << 24) | (1)
-        e = (0x00 << 40) | (0x02 << 32) | ( 0x03 << 24) | (0xff << 16) | (0xff << 8) | (0xff)
-    else:
-        s = start
-        e = end
-    n_macs = []
-    for v in xrange(s, e):
-        mask = (v & 0xff0000) == 0xff0000 or \
-               (v & 0x00ff00) == 0x00ff00 or \
-               (v & 0x0000ff) == 0x0000ff
-        if mask:
-            continue
-        n_macs.append(v)
-        if len(n_macs) == num:
-            break
-
-    def n_to_mac(n):
-        n_tuple = ( (n >> 40) & 0xff,
-                    (n >> 32) & 0xff,
-                    (n >> 24) & 0xff,
-                    (n >> 16) & 0xff,
-                    (n >> 8)  & 0xff,
-                    n & 0xff,
-        )
-        return '%02x:%02x:%02x:%02x:%02x:%02x' %(n_tuple)
-
-    #convert the number to macs
-    return map(n_to_mac, n_macs)
-
-def get_radius_networks(num):
-    PORT_SUBNET_START = '12.0.0.0'
-    PORT_SUBNET_MASK = '/24'
-    PORT_SUBNET_END = '220.0.0.0'
-    port_start_list = map(lambda ip: int(ip), PORT_SUBNET_START.split('.'))
-    port_end_list = map(lambda ip: int(ip), PORT_SUBNET_END.split('.'))
-    port_subnet_start = (port_start_list[0] << 24) | ( port_start_list[1] << 16 ) | ( port_start_list[2] << 8 ) | 0
-    port_subnet_end = (port_end_list[0] << 24) | ( port_end_list[1] << 16 ) | ( port_end_list[2] << 8 ) | 0
-    mask = int(PORT_SUBNET_MASK[1:])
-    net_list = []
-    for n in xrange(port_subnet_start, port_subnet_end, 256):
-        subnet = ((n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, 0, mask)
-        prefix = subnet[:3]
-        gw = prefix + (1,)
-        subnet_s = '{}.{}.{}.{}/{}'.format(*subnet)
-        prefix_s = '{}.{}.{}'.format(*prefix)
-        gw_s = '{}.{}.{}.{}'.format(*gw)
-        net_list.append((prefix_s, subnet_s, gw_s))
-        if len(net_list) >= num:
-            break
-
-    return net_list
-
-def get_radius_user_file():
-    cur_dir = os.path.dirname(os.path.realpath(__file__))
-    radius_authorize = 'setup/radius-config/freeradius/mods-config/files/authorize'
-    radius_user_file = os.path.join(cur_dir, '..', *radius_authorize.split('/'))
-    return radius_user_file
-
-def radius_add_users(num):
-    global RADIUS_USER_MAC_START, RADIUS_USER_MAC_END
-    template = '''
-%s Cleartext-Password := "radpass"
-\tReply-Message := "Hello, %%{User-Name}"
-'''
-    radius_user_file = get_radius_user_file()
-    if not os.access(radius_user_file, os.F_OK):
-        return False
-    mac_start = RADIUS_USER_MAC_START
-    mac_end =   RADIUS_USER_MAC_END
-    macs = get_radius_macs(num, start = mac_start, end = mac_end)
-    save_file = '{}.save'.format(radius_user_file)
-    new_file = '{}.new'.format(radius_user_file)
-    shutil.copy(radius_user_file, save_file)
-    with open(radius_user_file, 'r') as f:
-        lines = f.readlines()
-    for m in macs:
-        lines.append(template %(m))
-    with open(new_file, 'w') as f:
-        f.writelines(lines)
-    os.rename(new_file, radius_user_file)
-    return True
-
-def radius_restore_users():
-    radius_user_file = get_radius_user_file()
-    save_file = '{}.save'.format(radius_user_file)
-    if not os.access(save_file, os.F_OK):
-        return False
-    os.rename(save_file, radius_user_file)
-    return True
diff --git a/src/test/utils/Enum.py b/src/test/utils/Enum.py
deleted file mode 100644
index b748fd3..0000000
--- a/src/test/utils/Enum.py
+++ /dev/null
@@ -1,145 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#!python
-import copy
-import pprint
-pf = pprint.pformat
-
-class EnumException(Exception):
-    pass
-class Enumeration(object):
-    def __init__(self, name, enumList, valuesAreUnique=False, startValue=0):
-        self.__doc__ = name
-        self.uniqueVals = valuesAreUnique
-        self.lookup = {}
-        self.reverseLookup = {}
-
-        self._addEnums(enumList, startValue)
-
-    def _addEnums(self, enumList, startValue):
-        i = startValue
-        for x in enumList:
-            if type(x) is tuple:
-                try:
-                    x, i = x
-                except ValueError:
-                    raise EnumException, "tuple doesn't have 2 items: %r" % (x,)
-            if type(x) is not str:
-                raise EnumException, "enum name is not a string: %r" % (x,)
-            if x in self.lookup:
-                raise EnumException, "enum name is not unique: %r" % (x,)
-            if self.uniqueVals and i in self.reverseLookup:
-                raise EnumException, "enum value %r not unique for %r" % (i, x)
-            self.lookup[x] = i
-            self.reverseLookup[i] = x
-
-            if type(i) is int:
-                i = i + 1
-
-        values = self.lookup.values()
-        self.first_int  = min(values)
-        self.last_int   = max(values)
-        self.first_name = self.reverseLookup[self.first_int]
-        self.last_name  = self.reverseLookup[self.last_int]
-
-    def __str__(self):
-        return pf(self.lookup)
-
-    def __repr__(self):
-        return pf(self.lookup)
-
-    def __eq__(self, other):
-        return isinstance(other, Enumeration) and self.__doc__ == other.self.__doc__ and 0 == cmp(self.lookup, other.lookup)
-
-    def extend(self, enumList):
-        '''
-        Extend an existing enumeration with additional values.
-        '''
-        startValue = self.last_int + 1
-        self._addEnums(enumList, startValue)
-
-    def __getattr__(self, attr):
-        try: return self.lookup[attr]
-        except KeyError: raise AttributeError, attr
-
-    def whatis(self,value):
-        return self.reverseLookup[value]
-
-    def toInt(self, strval):
-        return self.lookup.get(strval)
-
-    def toStr(self,value):
-        return self.reverseLookup.get(value,"Value undefined: %s" % str(value))
-
-    def range(self):
-        keys = copy.copy(self.reverseLookup.keys())
-        keys.sort()
-        return keys
-
-    def valid(self, value):
-        return value in self.reverseLookup.keys()
-
-    def invalid(self, value):
-        return value not in self.reverseLookup.keys()
-
-    def vrange(self):
-        ''' returns an iterator of the enumeration values '''
-        return copy.copy(self.lookup.keys())
-
-    def first_asInt(self):
-        return self.first_int
-
-    def last_asInt(self):
-        return self.last_int
-
-    def first_asName(self):
-        return self.first_name
-
-    def last_asName(self):
-        return self.last_name
-
-if __name__ == '__main__':
-    #lets test things
-
-    testEnum0 = Enumeration("EnumName0",
-        ("Value0","Value1","Value2","Value3","Value4","Value5","Value6"))
-
-    print testEnum0.Value6
-
-    if testEnum0.__getattr__("Value6") == testEnum0.Value6:
-        print "Looks good"
-
-    # This is a bad case, we inserted a non-string value which should case
-    # an exception.
-#    testEnum1 = Enumeration("EnumName1",
-#        ("Value0","Value1","Value2",1,"Value3","Value4","Value5","Value6"))
-
diff --git a/src/test/utils/Fabric.py b/src/test/utils/Fabric.py
deleted file mode 100644
index 97761f6..0000000
--- a/src/test/utils/Fabric.py
+++ /dev/null
@@ -1,164 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os, sys
-import json
-import platform
-import subprocess
-from apiclient.maas_client import MAASOAuth, MAASDispatcher, MAASClient
-from paramiko import SSHClient, WarningPolicy, AutoAddPolicy
-
-class FabricMAAS(object):
-    CORD_TEST_HOST = '172.17.0.1'
-    head_node = os.getenv('HEAD_NODE', CORD_TEST_HOST)
-    maas_url = 'http://{}/MAAS/api/1.0/'.format(head_node)
-
-    def __init__(self, api_key = None, url = maas_url):
-        if api_key == None:
-            self.api_key = self.get_api_key()
-        else:
-            self.api_key = api_key
-        self.auth = MAASOAuth(*self.api_key.split(':'))
-        self.url = url
-        self.client = MAASClient(self.auth, MAASDispatcher(), self.url)
-
-    @classmethod
-    def get_api_key(cls):
-        api_key = os.getenv('MAAS_API_KEY', None)
-        if api_key:
-            return api_key
-        cmd = ['maas-region-admin', 'apikey', '--username=cord']
-        try:
-            p = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
-        except:
-            return 'UNKNOWN'
-        out, err = p.communicate()
-        if err:
-            raise Exception('Cannot get api key for MAAS')
-        return out.strip()
-
-    def get_node_list(self):
-        nodes = self.client.get(u'nodes/', 'list').read()
-        node_list = json.loads(nodes)
-        hosts = [ self.head_node ] +  map(lambda n: n['hostname'], node_list)
-        return hosts
-
-class Fabric(object):
-    entropy = 1
-    simulation = False
-    def __init__(self, node_list, user = 'ubuntu', passwd = 'ubuntu', key_file = None, verbose = False):
-        self.cur_node = None
-        if Fabric.simulation:
-            self.cur_node = FabricMAAS.head_node
-        self.node_list = node_list
-        self.users = [ user ]
-        if 'vagrant' not in self.users:
-            self.users.append('vagrant')
-        if 'ubuntu' not in self.users:
-            self.users.append('ubuntu')
-        self.passwd = passwd
-        self.key_file = key_file
-        self.verbose = verbose
-        self.client = SSHClient()
-        self.client.load_system_host_keys()
-        self.client.set_missing_host_key_policy(AutoAddPolicy())
-
-    def run_cmd(self, node, neighbor, cmd, simulation = False):
-        if simulation is True:
-            Fabric.entropy = Fabric.entropy ^ 1
-            return bool(Fabric.entropy)
-        if node == self.cur_node:
-            res = os.system(cmd)
-            return res == 0
-        ssh_user = None
-        for user in self.users:
-            try:
-                self.client.connect(node, username = user, key_filename = self.key_file, timeout = 5)
-                ssh_user = user
-                break
-            except:
-                continue
-
-        if ssh_user is None:
-            print('Unable to ssh to node %s for neighbor %s' %(node, neighbor))
-            return False
-        else:
-            if self.verbose:
-                print('ssh connection to node %s with user %s' %(node, ssh_user))
-        channel = self.client.get_transport().open_session()
-        channel.exec_command(cmd)
-        status = channel.recv_exit_status()
-        channel.close()
-        if self.verbose:
-            print('Cmd %s returned with status %d on node %s for neighbor %s' %(cmd, status, node, neighbor))
-        return status == 0
-
-    def ping_neighbor(self, node, neighbor):
-        cmd = 'ping -c 1 -w 2 {}'.format(neighbor)
-        return self.run_cmd(node, neighbor, cmd, Fabric.simulation)
-
-    def ping_neighbors(self):
-        result_map = []
-        for n in self.node_list:
-            for adj in self.node_list:
-                if adj == n:
-                    continue
-                res = self.ping_neighbor(n, adj)
-                result_map.append((n,adj,res))
-
-        ##report
-        if self.verbose:
-            for node, neighbor, res in result_map:
-                print('Ping from node %s to neighbor %s returned %s\n' %(node, neighbor, res))
-
-        failed_nodes = filter(lambda f: f[2] == False, result_map)
-        return failed_nodes
-
-if __name__ == '__main__':
-    if len(sys.argv) > 1:
-        nodes_file = sys.argv[1]
-        with open(nodes_file, 'r') as fd:
-            nodes = json.load(fd)
-        node_list = nodes['node_list']
-    else:
-        m = FabricMAAS()
-        node_list = m.get_node_list()
-        print('Node list: %s' %node_list)
-    key_file = os.getenv('SSH_KEY_FILE', None)
-    Fabric.simulation = True if key_file is None else False
-    fab = Fabric(node_list, verbose = True, key_file = key_file)
-    failed_nodes = fab.ping_neighbors()
-    if failed_nodes:
-        print('Failed nodes: %s' %failed_nodes)
-        for node, neighbor, _ in failed_nodes:
-            print('Ping from node %s to neighbor %s Failed' %(node, neighbor))
-    else:
-        print('Fabric test between nodes %s is successful' %node_list)
diff --git a/src/test/utils/IGMP.py b/src/test/utils/IGMP.py
deleted file mode 100644
index 872ab38..0000000
--- a/src/test/utils/IGMP.py
+++ /dev/null
@@ -1,269 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from socket import *
-from struct import *
-from scapy.all import *
-from itertools import *
-
-IGMP_TYPE_MEMBERSHIP_QUERY     = 0x11
-IGMP_TYPE_V3_MEMBERSHIP_REPORT = 0x22
-IGMP_TYPE_V3_MEMBERSHIP_REPORT_NEGATIVE = 0xdd
-IGMP_TYPE_V1_MEMBERSHIP_REPORT = 0x12
-IGMP_TYPE_V2_MEMBERSHIP_REPORT = 0x16
-IGMP_TYPE_V2_LEAVE_GROUP       = 0x17
-
-IGMP_V3_GR_TYPE_INCLUDE           = 0x01
-IGMP_V3_GR_TYPE_INCLUDE_NEGATIVE  = 0xaa
-IGMP_V3_GR_TYPE_EXCLUDE           = 0x02
-IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE = 0x03
-IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE = 0x04
-IGMP_V3_GR_TYPE_ALLOW_NEW         = 0x05
-IGMP_V3_GR_TYPE_BLOCK_OLD         = 0x06
-
-"""
-IGMPV3_ALL_ROUTERS = '224.0.0.22'
-IGMPv3 = 3
-IP_SRC = '1.2.3.4'
-ETHERTYPE_IP = 0x0800
-IGMP_DST_MAC = "01:00:5e:00:01:01"
-IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-"""
-
-
-class IGMPv3gr(Packet):
-    """IGMPv3 Group Record, used in membership report"""
-
-    name = "IGMPv3gr"
-
-    igmp_v3_gr_types = {
-        IGMP_V3_GR_TYPE_INCLUDE: "Include Mode",
-        IGMP_V3_GR_TYPE_INCLUDE_NEGATIVE: "Include Mode in negative scenario",
-        IGMP_V3_GR_TYPE_EXCLUDE: "Exclude Mode",
-        IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE: "Change to Include Mode",
-        IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE: "Change to Exclude Mode",
-        IGMP_V3_GR_TYPE_ALLOW_NEW: "Allow New Sources",
-        IGMP_V3_GR_TYPE_BLOCK_OLD: "Block Old Sources"
-    }
-
-    fields_desc = [
-        ByteEnumField("rtype", IGMP_V3_GR_TYPE_INCLUDE, igmp_v3_gr_types),
-        ByteField("aux_data_len", 0),
-        FieldLenField("numsrc", None, count_of="sources"),
-        IPField("mcaddr", "0.0.0.0"),
-        FieldListField("sources", None, IPField("src", "0.0.0.0"), "numsrc")
-    ]
-
-    def post_build(self, pkt, payload):
-        pkt += payload
-        if self.aux_data_len != 0:
-            print("WARNING: Auxiliary Data Length must be zero (0)")
-        return pkt
-
-
-class IGMPv3(Packet):
-
-    name = "IGMPv3"
-
-    igmp_v3_types = {
-        IGMP_TYPE_MEMBERSHIP_QUERY: "Membership Query",
-        IGMP_TYPE_V3_MEMBERSHIP_REPORT: " Version 3 Mebership Report",
-        IGMP_TYPE_V2_MEMBERSHIP_REPORT: " Version 2 Mebership Report",
-        IGMP_TYPE_V1_MEMBERSHIP_REPORT: " Version 1 Mebership Report",
-        IGMP_TYPE_V2_LEAVE_GROUP: "Version 2 Leave Group"
-    }
-
-    fields_desc = [
-        ByteEnumField("type", IGMP_TYPE_MEMBERSHIP_QUERY, igmp_v3_types),
-        ByteField("max_resp_code", 0),
-        XShortField("checksum", None),
-        #IPField("group_address", "0.0.0.0"),
-
-        # membership query fields
-        ConditionalField(IPField("gaddr", "0.0.0.0"), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(BitField("resv", 0, 4), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(BitField("s", 0, 1), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(BitField("qrv", 0, 3), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(ByteField("qqic", 0), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(FieldLenField("numsrc", None, count_of="srcs"), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-        ConditionalField(FieldListField("srcs", None, IPField("src", "0.0.0.0"), "numsrc"), lambda pkt: pkt.type == IGMP_TYPE_MEMBERSHIP_QUERY),
-
-        # membership report fields
-        ConditionalField(ShortField("resv2", 0), lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT),
-        ConditionalField(FieldLenField("numgrp", None, count_of="grps"), lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT),
-        ConditionalField(PacketListField("grps", [], IGMPv3gr), lambda pkt: pkt.type == IGMP_TYPE_V3_MEMBERSHIP_REPORT)
-
-        # TODO: v2 and v3 membership reports?
-
-    ]
-
-    def post_build(self, pkt, payload):
-
-        pkt += payload
-
-        if self.type in [IGMP_TYPE_V3_MEMBERSHIP_REPORT,]: # max_resp_code field is reserved (0)
-            mrc = 0
-        else:
-            mrc = self.encode_float(self.max_resp_code)
-        pkt = pkt[:1] + chr(mrc) + pkt[2:]
-
-        if self.checksum is None:
-            chksum = checksum(pkt)
-            pkt = pkt[:2] + chr(chksum >> 8) + chr(chksum & 0xff) + pkt[4:]
-
-        return pkt
-
-    def encode_float(self, value):
-        """Encode max response time value per RFC 3376."""
-        if value < 128:
-            return value
-        if value > 31743:
-            return 255
-        exp = 0
-        value >>= 3
-        while value > 31:
-            exp += 1
-            value >>= 1
-        return 0x80 | (exp << 4) | (value & 0xf)
-
-
-    def decode_float(self, code):
-        if code < 128:
-            return code
-        mant = code & 0xf
-        exp = (code >> 4) & 0x7
-        return (mant | 0x10) << (exp + 3)
-
-    @staticmethod
-    def is_valid_mcaddr(ip):
-        byte1 = atol(ip) >> 24 & 0xff
-        return (byte1 & 0xf0) == 0xe0
-
-    @staticmethod
-    def fixup(pkt, invalid_ttl = None):
-        """Fixes up the underlying IP() and Ether() headers."""
-        assert pkt.haslayer(IGMPv3), "This packet is not an IGMPv4 packet; cannot fix it up"
-
-        igmp = pkt.getlayer(IGMPv3)
-
-        if pkt.haslayer(IP):
-            ip = pkt.getlayer(IP)
-            if invalid_ttl is None:
-               ip.ttl = 1
-            else:
-               ip.ttl = 20
-            ip.proto = 2
-            ip.tos = 0xc0
-            ip.options = [IPOption_Router_Alert()]
-
-            if igmp.type == IGMP_TYPE_MEMBERSHIP_QUERY:
-                if igmp.gaddr == "0.0.0.0":
-                    ip.dst = "224.0.0.1"
-                else:
-                    assert IGMPv3.is_valid_mcaddr(igmp.gaddr), "IGMP membership query with invalid mcast address"
-                    ip.dst = igmp.gaddr
-
-            elif igmp.type == IGMP_TYPE_V2_LEAVE_GROUP and IGMPv3.is_valid_mcaddr(igmp.gaddr):
-                ip.dst = "224.0.0.2"
-
-            elif (igmp.type in (IGMP_TYPE_V1_MEMBERSHIP_REPORT, IGMP_TYPE_V2_MEMBERSHIP_REPORT) and
-                  IGMPv3.is_valid_mcaddr(igmp.gaddr)):
-                ip.dst = igmp.gaddr
-
-           # We do not need to fixup the ether layer, it is done by scapy
-           #
-           # if pkt.haslayer(Ether):
-           #     eth = pkt.getlayer(Ether)
-           #     ip_long = atol(ip.dst)
-           #     ether.dst = '01:00:5e:%02x:%02x:%02x' % ( (ip_long >> 16) & 0x7f, (ip_long >> 8) & 0xff, ip_long & 0xff )
-
-
-        return pkt
-
-
-bind_layers(IP,       IGMPv3,   frag=0, proto=2, ttl=1, tos=0xc0)
-bind_layers(IGMPv3,   IGMPv3gr, frag=0, proto=2)
-bind_layers(IGMPv3gr, IGMPv3gr, frag=0, proto=2)
-
-
-if __name__ == "__main__":
-
-    print("test float encoding")
-    from math import log
-    max_expected_error = 1.0 / (2<<3) # four bit precision
-    p = IGMPv3()
-    for v in range(0, 31745):
-        c = p.encode_float(v)
-        d = p.decode_float(c)
-        rel_err = float(v-d)/v if v!=0 else 0.0
-        assert rel_err <= max_expected_error
-
-    print("construct membership query - general query")
-    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120)
-    hexdump(str(mq))
-
-    print("construct membership query - group-specific query")
-    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120, gaddr="224.0.0.1")
-    hexdump(str(mq))
-
-    print("construct membership query - group-and-source-specific query")
-    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY, max_resp_code=120, gaddr="224.0.0.1")
-    mq.srcs = ['1.2.3.4', '5.6.7.8']
-    hexdump(str(mq))
-
-    print("fixup")
-    mq = IGMPv3(type=IGMP_TYPE_MEMBERSHIP_QUERY)
-    mq.srcs = ['1.2.3.4', '5.6.7.8']
-    pkt = Ether() / IP() / mq
-    print("before fixup:")
-    hexdump(str(pkt))
-
-    print("after fixup:")
-
-    IGMPv3.fixup(pkt,'no')
-    hexdump(str(pkt))
-
-    print("construct v3 membership report - join a single group")
-    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30, gaddr="224.0.0.1")
-    mr.grps = [IGMPv3gr( rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.30")]
-    hexdump(mr)
-
-    print("construct v3 membership report - join two groups")
-    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30, gaddr="224.0.0.1")
-    mr.grps = [
-        IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.30"),
-        IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr="229.10.20.31")
-    ]
-    hexdump(mr)
-
-    print("construct v3 membership report - leave a group")
-    mr = IGMPv3(type=IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30, gaddr="224.0.0.1")
-    mr.grps = [IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr="229.10.20.30")]
-    hexdump(mr)
-
-    print("all ok")
diff --git a/src/test/utils/McastTraffic.py b/src/test/utils/McastTraffic.py
deleted file mode 100644
index cc18e2d..0000000
--- a/src/test/utils/McastTraffic.py
+++ /dev/null
@@ -1,83 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import threading
-import sys
-import os
-import time
-import monotonic
-from scapy.all import *
-
-class McastTraffic(threading.Thread):
-    DST_MAC_DEFAULT = '01:00:5e:00:01:01'
-    SRC_MAC_DEFAULT = '02:88:b4:e4:90:77'
-    SRC_IP_DEFAULT = '1.2.3.4'
-    SEND_STATE = 1
-    RECV_STATE = 2
-
-    def __init__(self, addrs, iface = 'eth0', dst_mac = DST_MAC_DEFAULT, src_mac = SRC_MAC_DEFAULT,
-                 src_ip = SRC_IP_DEFAULT, cb = None, arg = None):
-        threading.Thread.__init__(self)
-        self.addrs = addrs
-        self.iface = iface
-        self.dst_mac = dst_mac
-        self.src_mac = src_mac
-        self.src_ip = src_ip
-        self.cb = cb
-        self.arg = arg
-        self.state = self.SEND_STATE | self.RECV_STATE
-
-    def run(self):
-        eth = Ether(dst = self.dst_mac, src = self.src_mac)
-        while self.state & self.SEND_STATE:
-            for addr in self.addrs:
-                #data = repr(time.time())
-                data = repr(monotonic.monotonic())
-                ip = IP(dst = addr, src = self.src_ip)
-                sendp(eth/ip/data, iface = self.iface)
-            if self.cb:
-                self.cb(self.arg)
-
-    def stop(self):
-        self.state = 0
-
-    def stopReceives(self):
-        self.state &= ~self.RECV_STATE
-
-    def stopSends(self):
-        self.state &= ~self.SEND_STATE
-
-    def isRecvStopped(self):
-        return False if self.state & self.RECV_STATE else True
-
-    def isSendStopped(self):
-        return False if self.state & self.SEND_STATE else True
-
diff --git a/src/test/utils/OltConfig.py b/src/test/utils/OltConfig.py
deleted file mode 100644
index 71ac056..0000000
--- a/src/test/utils/OltConfig.py
+++ /dev/null
@@ -1,184 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import json
-##load the olt config
-
-class OltConfig:
-    def __init__(self, olt_conf_file = ''):
-        if not olt_conf_file:
-            self.olt_conf_file = os.getenv('OLT_CONFIG')
-        else:
-            self.olt_conf_file = olt_conf_file
-        try:
-            self.olt_handle = open(self.olt_conf_file, 'r')
-            self.olt_conf = json.load(self.olt_handle)
-            self.olt_conf['olt'] = True
-        except:
-            self.olt_handle = None
-            self.olt_conf = {}
-            self.olt_conf['olt'] = False
-
-    def on_olt(self):
-        return self.olt_conf['olt'] is True
-
-    def olt_port_map(self):
-        if self.on_olt() and self.olt_conf.has_key('port_map'):
-            port_map = {}
-            port_map['ponsim'] = self.olt_conf['port_map'].has_key('ponsim')
-            if self.olt_conf['port_map'].has_key('switches'):
-                port_map['switches'] = self.olt_conf['port_map']['switches']
-            else:
-                port_map['switches'] = []
-                nr_switches = 1
-                if self.olt_conf['port_map'].has_key('nr_switches'):
-                    nr_switches = int(self.olt_conf['port_map']['nr_switches'])
-                for sw in xrange(nr_switches):
-                    switch = 'br-int{}'.format(sw+1) if sw > 0 else 'br-int'
-                    port_map['switches'].append(switch)
-            #if we have a host interface enabled, invalidate the switches config
-            if self.olt_conf['port_map'].has_key('host'):
-                #if host interface is specified, then use the host instead of ovs switch
-                port_map['host'] = self.olt_conf['port_map']['host']
-                port_map['switches'] = [ port_map['host'] ] + port_map['switches']
-            else:
-                port_map['host'] = port_map['switches'][0]
-            nr_switches = len(port_map['switches'])
-            port_map['switch_port_list'] = []
-            if self.olt_conf['port_map'].has_key('ports'):
-                port_map['ports'] = self.olt_conf['port_map']['ports']
-                num_ports = len(port_map['ports'])
-                port_map['switch_port_list'].append( (port_map['switches'][0], port_map['ports']) )
-                index = 1
-                for switch in port_map['switches'][1:]:
-                    port_start = index * num_ports * 2
-                    port_end = port_start + num_ports * 2
-                    index += 1
-                    port_list = []
-                    for port in xrange(port_start, port_end, 2):
-                        port_name = 'veth{}'.format(port)
-                        port_map['ports'].append(port_name)
-                        port_list.append(port_name)
-                    port_map['switch_port_list'].append( (switch, port_list) )
-            else:
-                port_map['ports'] = []
-                num_ports = int(self.olt_conf['port_map']['num_ports'])
-                for sw in xrange(nr_switches):
-                    port_list = []
-                    switch = port_map['switches'][sw]
-                    port_start = sw * num_ports * 2
-                    port_end = port_start + num_ports * 2
-                    for port in xrange(port_start, port_end, 2):
-                        port_name = 'veth{}'.format(port)
-                        port_map['ports'].append(port_name)
-                        port_list.append(port_name)
-                    port_map['switch_port_list'].append( (switch, port_list) )
-            ##also add dhcprelay ports. We add as many relay ports as subscriber ports
-            port_map['num_ports'] = num_ports
-            relay_ports = num_ports
-            port_map['relay_ports'] = []
-            port_map['switch_relay_port_list'] = []
-            port_map['radius_ports'] = []
-            port_map['switch_radius_port_list'] = []
-            for sw in xrange(nr_switches):
-                port_list = []
-                switch = port_map['switches'][sw]
-                port_start = (nr_switches + sw) * relay_ports * 2
-                port_end = port_start + relay_ports * 2
-                for port in xrange(port_start, port_end, 2):
-                    port_name = 'veth{}'.format(port)
-                    port_map['relay_ports'].append(port_name)
-                    port_list.append(port_name)
-                port_map['switch_relay_port_list'].append( (switch, port_list) )
-            for sw in xrange(nr_switches):
-                switch = port_map['switches'][sw]
-                if not switch.startswith('br-int'):
-                    continue
-                port_name = 'veth{}'.format(port_end)
-                port_list = [ port_name ]
-                port_map['switch_radius_port_list'].append( (switch, port_list) )
-                port_map['radius_ports'].append(port_name)
-                port_end += 2
-            port_num = 1
-            port_map['uplink'] = int(self.olt_conf['uplink'])
-            port_map['wan'] = None
-            if self.olt_conf.has_key('wan'):
-                port_map['wan'] = self.olt_conf['wan']
-            port_list = []
-            ##build the port map and inverse port map
-            for sw in xrange(nr_switches):
-                sw_portnum = 1
-                switch, ports = port_map['switch_port_list'][sw]
-                uplink = sw * num_ports + port_map['uplink']
-                port_map[switch] = {}
-                port_map[switch]['uplink'] = uplink
-                for p in ports:
-                    port_map[port_num] = p
-                    port_map[p] = port_num
-                    if sw_portnum != port_map['uplink']:
-                        #create tx, rx map
-                        port_list.append( (uplink, port_num) )
-                    port_num += 1
-                    sw_portnum += 1
-            ##build the port and inverse map for relay ports
-            for port in port_map['relay_ports']:
-                port_map[port_num] = port
-                port_map[port] = port_num
-                port_num += 1
-            for port in port_map['radius_ports']:
-                port_map[port_num] = port
-                port_map[port] = port_num
-                port_num += 1
-            port_map['start_vlan'] = 0
-            if self.olt_conf['port_map'].has_key('start_vlan'):
-                port_map['start_vlan'] = int(self.olt_conf['port_map']['start_vlan'])
-
-            return port_map, port_list
-        else:
-            return None, None
-
-    def olt_device_data(self):
-        if self.on_olt():
-            accessDeviceDict = {}
-            accessDeviceDict['uplink'] = str(self.olt_conf['uplink'])
-            accessDeviceDict['vlan'] = str(self.olt_conf['vlan'])
-            return accessDeviceDict
-        return None
-
-    def get_vcpes(self):
-        if self.on_olt():
-            if self.olt_conf.has_key('vcpe'):
-                return self.olt_conf['vcpe']
-        return []
-
-    def get_vcpes_by_type(self, service):
-        return filter(lambda vcpe: vcpe['type'].lower() == service.lower(), self.get_vcpes())
diff --git a/src/test/utils/OnboardingServiceUtils.py b/src/test/utils/OnboardingServiceUtils.py
deleted file mode 100644
index 1f2278b..0000000
--- a/src/test/utils/OnboardingServiceUtils.py
+++ /dev/null
@@ -1,308 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import shutil
-import re
-from novaclient import client as nova_client
-import novaclient.v1_1.client as novaclient
-from SSHTestAgent import SSHTestAgent
-from CordTestUtils import *
-from CordTestUtils import log_test as log
-
-log.setLevel('INFO')
-
-class OnboardingServiceUtils(object):
-
-    @classmethod
-    def setUp(cls):
-        pass
-
-    @classmethod
-    def tearDown(cls):
-        pass
-
-    '''
-    @method: get_nova_credentials_v2
-    @Description: Get nova credentials
-    @params:
-    returns credential from env
-    '''
-    @classmethod
-    def get_nova_credentials_v2(cls):
-        credential = {}
-        credential['username'] = os.environ['OS_USERNAME']
-        credential['api_key'] = os.environ['OS_PASSWORD']
-        credential['auth_url'] = os.environ['OS_AUTH_URL']
-        credential['project_id'] = os.environ['OS_TENANT_NAME']
-        return credential
-
-    '''
-    @method: get_compute_nodes
-    @Description: Get the list of compute nodes
-    @params:
-    returns  node list
-    '''
-    @classmethod
-    def get_compute_nodes(cls):
-        credentials = cls.get_nova_credentials_v2()
-        nvclient = nova_client.Client('2', **credentials)
-        return nvclient.hypervisors.list()
-
-    '''
-    @method: get_exampleservices
-    @Description: Get list of exampleservice's running in compute node
-    @params: status of exampleservice
-    returns exampleservice wrappers
-    '''
-    @classmethod
-    def get_exampleservices(cls, active = True):
-        credentials = cls.get_nova_credentials_v2()
-        nvclient = nova_client.Client('2', **credentials)
-        exampleservices = nvclient.servers.list(search_opts = {'all_tenants': 1})
-        if active is True:
-            exampleservices = filter(lambda exampleservice: exampleservice.status == 'ACTIVE', exampleservices)
-        exampleservice_wrappers = []
-        for exampleservice in exampleservices:
-            exampleservice_wrappers.append(ExampleSeviceWrapper(exampleservice))
-        return exampleservice_wrappers
-
-    '''
-    @method: health_check
-    @Description: Check if exampleservices are reachable
-    @params:
-    returns True
-    '''
-    @classmethod
-    def health_check(cls):
-        '''Returns 0 if all active exampleservices are reachable through the compute node'''
-        exampleservices = cls.get_exampleservices()
-        exampleservice_status = []
-        for exampleservice in exampleservices:
-            exampleservice_status.append(exampleservice.get_health())
-        unreachable = filter(lambda st: st == False, exampleservice_status)
-        return len(unreachable) == 0
-
-    def make_veth_pairs(self):
-
-        def check_iface(iface):
-            return os.system('ip link show {}'.format(iface)) == 0
-
-        def make_veth(iface):
-            os.system('ip link add type veth')
-            os.system('ip link set {} up'.format(iface))
-            peer = iface[:len('veth')] + str(int(iface[len('veth'):]) + 1)
-            os.system('ip link set {} up'.format(peer))
-            assert has_iface(iface)
-
-        for iface_number in (0, 2):
-            iface = 'veth{}'.format(iface_number)
-            if not check_iface(iface):
-                make_veth(iface)
-                yield asleep(2)
-
-    def source_env(self):
-        a_dir = os.path.abspath(os.path.dirname(__file__))
-        res = os.system('cd {}'.format(a_dir))
-        assert res == 0
-
-        # set the env
-        command = ['bash', '-c', '. env.sh']
-        proc = subprocess.Popen(command, stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE)
-
-        if proc.wait() != 0:
-            err_msg = "Failed to source the environment'"
-            raise RuntimeError(err_msg)
-
-        env = os.environ.copy()
-        return env
-
-    @classmethod
-    def discover_exampleservice_vm_instance_on_cord(cls, tenant_name):
-        name=None
-        status=None
-        try:
-            credentials = cls.get_nova_credentials_v2()
-            nvclient = nova_client.Client('2', **credentials)
-            instance_list=nvclient.servers.list()
-            if instance_list > 0:
-
-               for inst in instance_list:
-
-                   instance_id = inst.id
-                   name=inst.name
-                   inst_find=nvclient.servers.find(id=instance_id)
-                   print('   - Instance %s Discovered' % inst.name)
-                   print('   - Instance ID %s Discovered' % instance_id)
-                   print('   - Instance %s Status' % inst.status)
-                   status=inst.status
-        except Exception:
-            print('   - Instance Not Found')
-            status = False
-
-        instance_data = {'instance_name': name,
-                                'status': status }
-        return instance_data
-
-
-    @classmethod
-    def terminate_exampleservice_instance_vm_on_cord(cls, tenant_name, vm_name, network_id):
-        credentials = cls.get_nova_credentials_v2()
-        nvclient = nova_client.Client('2', **credentials)
-        nvclient.quotas.delete(tenant_name)
-        try:
-            instance = nvclient.servers.find(name=vm_name)
-            nvclient.servers.delete(instance.id)
-            print "  * Instance terminated on cord: " + str(network_id)
-        except Exception:
-            print "  * Instance Not Found on cord: " + str(network_id)
-            pass
-        return True
-
-class ExampleSeviceWrapper(object):
-
-    def __init__(self, exampleservice):
-        self.exampleservice = exampleservice
-        self.name = self.exampleservice.name
-        self.compute_node = self.get_compute_node()
-        self.ip = self.get_ip()
-
-    '''
-    @method: get_compute_node
-    @Description:
-    @params:
-    returns compute node name
-    '''
-    def get_compute_node(self):
-        return self.exampleservice._info['OS-EXT-SRV-ATTR:hypervisor_hostname']
-
-    '''
-    @method: get_ip
-    @Description:
-    @params:
-    returns ip of network
-    '''
-    def get_ip(self):
-        if 'management' in self.exampleservice.networks:
-            ips = self.exampleservice.networks['management']
-            if len(ips) > 0:
-                return ips[0]
-        return None
-
-    def get_public_ip(self):
-        if 'public' in self.exampleservice.networks:
-            ips = self.exampleservice.networks['public']
-            if len(ips) > 0:
-                return ips[0]
-        return None
-
-    def get_name(self):
-        return  self.exampleservice.name
-
-    '''
-    @method: run_cmd_compute
-    @Description:
-    @params:
-    returns Status & output
-    '''
-    def run_cmd_compute(self, cmd, timeout = 5):
-        ssh_agent = SSHTestAgent(self.compute_node)
-        st, output = ssh_agent.run_cmd(cmd, timeout = timeout)
-        if st == True and output:
-            output = output.strip()
-        else:
-            output = None
-
-        return st, output
-
-    '''
-    @method: get_health
-    @Description:
-    @params:
-    returns Status
-    '''
-    def get_health(self):
-        if self.ip is None:
-            return True
-        cmd = 'ping -c 1 {}'.format(self.ip)
-        log.info('Pinging ONBOARDED SERVICE %s at IP %s' %(self.name, self.ip))
-        st, _ = self.run_cmd_compute(cmd)
-        log.info('ONBOARDED SERVICE %s at IP %s is %s' %(self.name, self.ip, 'reachable' if st == True else 'unreachable'))
-        return st
-
-    '''
-    @method: check_access
-    @Description: validates access
-    @params:
-    returns Status
-    '''
-    def check_access(self):
-        if self.ip is None:
-           return True
-        ssh_agent = SSHTestAgent(self.compute_node)
-        st, _ = ssh_agent.run_cmd('ls', timeout=10)
-        if st == False:
-            log.error('Compute node at %s is not accessible' %(self.compute_node))
-            return st
-        log.info('Checking if ONBOARDING SERVICE at %s is accessible from compute node %s' %(self.ip, self.compute_node))
-        st, _ = ssh_agent.run_cmd('ssh {} ls'.format(self.ip), timeout=30)
-        if st == True:
-            log.info('OK')
-        return st
-
-    '''
-    @method: Validate services
-    @Description: This validates if expected service is running in example service VM
-    @params:
-    returns Status
-    '''
-    def validate_service_in_vm(self):
-        if self.ip is None:
-           return True
-        ssh_agent = SSHTestAgent(self.compute_node)
-        st, _ = ssh_agent.run_cmd('ls', timeout=10)
-        if st == False:
-            log.error('Compute node at %s is not accessible' %(self.compute_node))
-            return st
-        log.info('Checking if APACHE SERVICE at %s is running %s' %(self.ip, self.compute_node))
-        st, _ = ssh_agent.run_cmd('ssh {} ls /var/run/apache2/apache2.pid'.format(self.ip), timeout=30)
-        if st == True:
-            log.info('OK')
-        return st
-
-    def pause(self):
-	return self.exampleservice.pause()
-
-    def unpause(self):
-        return self.exampleservice.unpause()
-
-    def stop(self):
-        return self.exampleservice.stop()
-
-    def start(self):
-        return self.exampleservice.start()
-
-    def suspend(self):
-        return self.exampleservice.suspend()
-
-    def resume(self):
-        return self.exampleservice.resume()
-
-    def reboot(self):
-        return self.exampleservice.reboot()
-
-
diff --git a/src/test/utils/OnosCtrl.py b/src/test/utils/OnosCtrl.py
deleted file mode 100644
index dc4f05c..0000000
--- a/src/test/utils/OnosCtrl.py
+++ /dev/null
@@ -1,475 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import json
-import requests
-import os,sys,time
-from OltConfig import OltConfig
-from CordTestUtils import get_mac, get_controller, log_test
-from EapolAAA import get_radius_macs, get_radius_networks
-
-class OnosCtrl:
-
-    auth = ('karaf', 'karaf')
-    controller = get_controller()
-    cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
-    maven_repo = 'http://central.maven.org/maven2/org/onosproject'
-    applications_url = 'http://%s:8181/onos/v1/applications' %(controller)
-    host_cfg_url = 'http://%s:8181/onos/v1/network/configuration/hosts/' %(controller)
-
-    def __init__(self, app, controller = None):
-        self.app = app
-        if controller is not None:
-            self.controller = controller
-        self.app_url = 'http://%s:8181/onos/v1/applications/%s' %(self.controller, self.app)
-        self.cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(self.controller)
-        self.auth = ('karaf', 'karaf')
-
-    @classmethod
-    def config(cls, config, controller=None):
-        if config is not None:
-            json_data = json.dumps(config)
-	    if controller is None:
-                resp = requests.post(cls.cfg_url, auth = cls.auth, data = json_data)
-	    else:
-		cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
-	        resp = requests.post(cfg_url, auth = cls.auth, data = json_data)
-            return resp.ok, resp.status_code
-        return False, 400
-
-    @classmethod
-    def get_config(cls, controller=None):
-	if controller is None:
-            controller = cls.controller
-	cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
-	resp = requests.get(cfg_url, auth = cls.auth)
-        if resp.ok:
-            return resp.json()
-        return None
-
-    @classmethod
-    def delete(cls, config, controller=None):
-        if config:
-            json_data = json.dumps(config)
-	    if controller is None:
-	        print('default Onos config url is %s'%cls.cfg_url)
-                resp = requests.delete(cls.cfg_url, auth = cls.auth, data = json_data)
-	    else:
-		cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
-	        resp = requests.delete(cfg_url, auth = cls.auth, data = json_data)
-            return resp.ok, resp.status_code
-        return False, 400
-
-    def activate(self):
-        resp = requests.post(self.app_url + '/active', auth = self.auth)
-        return resp.ok, resp.status_code
-
-    def deactivate(self):
-        resp = requests.delete(self.app_url + '/active', auth = self.auth)
-        return resp.ok, resp.status_code
-
-    @classmethod
-    def get_devices(cls, controller = None, mfr = None):
-        if controller is None:
-            controller = cls.controller
-        url = 'http://%s:8181/onos/v1/devices' %(controller)
-        result = requests.get(url, auth = cls.auth)
-        if result.ok:
-            devices = result.json()['devices']
-            devices = filter(lambda d: d['available'], devices)
-            if mfr:
-                devices = filter(lambda d: d['mfr'].startswith(mfr), devices)
-            return devices
-        return None
-
-    @classmethod
-    def get_links(cls, controller = None):
-        if controller is None:
-            controller = cls.controller
-        url = 'http://%s:8181/onos/v1/links' %(controller)
-        result = requests.get(url, auth = cls.auth)
-        if result.ok:
-            links = result.json()['links']
-            return links
-        return None
-
-    @classmethod
-    def get_device_id(cls, controller = None, mfr = None, olt_conf_file = ''):
-        '''If running under olt, we get the first switch connected to onos'''
-        olt = OltConfig(olt_conf_file = olt_conf_file)
-        did = 'of:' + get_mac()
-        if olt.on_olt():
-            devices = cls.get_devices(controller = controller, mfr = mfr)
-            if devices:
-                dids = map(lambda d: d['id'], devices)
-                if len(dids) == 1:
-                    did = dids[0]
-                else:
-                    ###If we have more than 1, then check for env before using first one
-                    did = os.getenv('OLT_DEVICE_ID', dids[0])
-
-        return did
-
-    @classmethod
-    def get_device_ids(cls, controller = None, olt_conf_file = ''):
-        '''If running under olt, we get the first switch connected to onos'''
-        olt = OltConfig(olt_conf_file = olt_conf_file)
-        did = 'of:' + get_mac()
-        device_ids = []
-        if olt.on_olt():
-            devices = cls.get_devices(controller = controller)
-            if devices:
-                device_ids = map(lambda d: d['id'], devices)
-        else:
-            device_ids.append(did)
-
-        return device_ids
-
-    @classmethod
-    def get_flows(cls, device_id,controller=None):
-        if controller is None:
-	    url = 'http://%s:8181/onos/v1/flows/' %(cls.controller) + device_id
-	else:
-	    url = 'http://%s:8181/onos/v1/flows/' %(controller) + device_id
-        result = requests.get(url, auth = cls.auth)
-        if result.ok:
-            return result.json()['flows']
-        return None
-
-    @classmethod
-    def get_ports_device(cls, device_id, controller = None):
-        if controller is None:
-            url = 'http://{}:8181/onos/v1/devices/{}/ports'.format(cls.controller, device_id)
-        else:
-            url = 'http://{}:8181/onos/v1/devices/{}/ports'.format(controller, device_id)
-
-        result = requests.get(url, auth = cls.auth)
-        if result.ok:
-            return result.json()['ports']
-        return None
-
-    @classmethod
-    def cord_olt_device_map(cls, olt_config, controller = None):
-        olt_device_list = []
-        olt_port_map, _ = olt_config.olt_port_map()
-        switches = olt_port_map['switches']
-        if len(switches) > 1:
-            device_ids = cls.get_device_ids(controller = controller)
-        else:
-            did = cls.get_device_id(controller = controller)
-            if did is None:
-                return olt_device_list
-            uplink_dict = {}
-            uplink_dict['did'] = did
-            uplink_dict['switch'] = switches[0]
-            uplink_dict['uplink'] = str(olt_config.olt_conf['uplink'])
-            uplink_dict['vlan'] = str(olt_config.olt_conf['vlan'])
-            olt_device_list.append(uplink_dict)
-            return olt_device_list
-
-        for did in device_ids:
-            ports = cls.get_ports_device(did, controller = controller)
-            if ports:
-                matched = False
-                for port in ports:
-                    for switch in switches:
-                        if port['annotations']['portName'] == switch:
-                            uplink_dict = {}
-                            uplink = olt_port_map[switch]['uplink']
-                            uplink_dict['did'] = did
-                            uplink_dict['switch'] = switch
-                            uplink_dict['uplink'] = str(uplink)
-                            uplink_dict['vlan'] = str(olt_config.olt_conf['vlan'])
-                            olt_device_list.append(uplink_dict)
-                            matched = True
-                            break
-                    if matched == True:
-                        break
-
-        return olt_device_list
-
-    @classmethod
-    def cord_olt_config(cls, olt_config, controller=None):
-        '''Configures OLT data for existing devices/switches'''
-        did_dict = {}
-        config = { 'devices' : did_dict }
-        olt_device_list = cls.cord_olt_device_map(olt_config, controller = controller)
-        if not olt_device_list:
-            return
-        for olt_map in olt_device_list:
-            access_device_dict = {}
-            device_data = {'uplink': olt_map['uplink'], 'vlan': olt_map['vlan']}
-            access_device_dict['accessDevice'] = device_data
-            did_dict[olt_map['did']] = access_device_dict
-
-        ##configure the device list with access information
-        return cls.config(config, controller=controller)
-
-    @classmethod
-    def install_app(cls, app_file, onos_ip = None):
-        params = {'activate':'true'}
-        headers = {'content-type':'application/octet-stream'}
-        url = cls.applications_url if onos_ip is None else 'http://{0}:8181/onos/v1/applications'.format(onos_ip)
-        with open(app_file, 'rb') as payload:
-            result = requests.post(url, auth = cls.auth,
-                                   params = params, headers = headers,
-                                   data = payload)
-	print('result.ok, result.status_code are %s and %s'%(result.ok, result.status_code))
-        return result.ok, result.status_code
-
-    @classmethod
-    def install_app_from_url(cls, app_name, app_version, app_url = None, onos_ip = None):
-        params = {'activate':'true'}
-        headers = {'content-type':'application/json'}
-        if app_url is None:
-            app_oar_file = '{}-{}.oar'.format(app_name, app_version)
-            app_url = '{0}/{1}/{2}/{3}'.format(cls.maven_repo, app_name, app_version, app_oar_file)
-        params['url'] = app_url
-        url = cls.applications_url if onos_ip is None else 'http://{0}:8181/onos/v1/applications'.format(onos_ip)
-        result = requests.post(url, auth = cls.auth,
-                               json = params, headers = headers)
-        return result.ok, result.status_code
-
-    @classmethod
-    def uninstall_app(cls, app_name, onos_ip = None):
-        params = {'activate':'true'}
-        headers = {'content-type':'application/octet-stream'}
-        url = cls.applications_url if onos_ip is None else 'http://{0}:8181/onos/v1/applications'.format(onos_ip)
-        app_url = '{}/{}'.format(url, app_name)
-        resp = requests.delete(app_url, auth = cls.auth)
-        return resp.ok, resp.status_code
-
-    @classmethod
-    def host_config(cls, config, onos_ip=None):
-        if config:
-           json_data = json.dumps(config)
-           url = cls.host_cfg_url if onos_ip is None else 'http://{}:8181/onos/v1/network/configuration/hosts/'.format(onos_ip)
-           resp = requests.post(url, auth = cls.auth, data = json_data)
-           return resp.ok, resp.status_code
-        return False, 400
-
-    @classmethod
-    def config_device_driver(cls, controller = None, dids = None, driver = 'voltha'):
-        driver_apps = ('org.onosproject.drivers', 'org.onosproject.openflow-base',)
-        if dids is None:
-            dids = cls.get_device_ids(controller = controller)
-        device_map = {}
-        for did in dids:
-            device_map[did] = { 'basic' : { 'driver' : driver } }
-        network_cfg = { 'devices' : device_map }
-        cls.config(network_cfg)
-        for driver in driver_apps:
-            cls(driver).deactivate()
-        time.sleep(2)
-        for driver in driver_apps:
-            cls(driver).activate()
-        time.sleep(5)
-
-    @classmethod
-    def device_id_to_mac(cls, device_id):
-        device_mac_raw = device_id[-12:]
-        hwaddrs = []
-        for i in xrange(0, 12, 2):
-            hwaddrs.append(device_mac_raw[i:i+2])
-
-        device_mac = ':'.join(hwaddrs)
-        return device_mac
-
-    @classmethod
-    def aaa_load_config(cls, controller = None, olt_conf_file = '', conn_type = 'socket'):
-        ovs_devices = cls.get_devices(controller = controller, mfr = 'Nicira')
-        if not ovs_devices and conn_type != 'socket':
-            log_test.info('No OVS devices found to configure AAA connect points')
-            return
-        olt = OltConfig(olt_conf_file = olt_conf_file)
-        port_map, _ = olt.olt_port_map()
-        app = 'org.opencord.aaa'
-        cfg = { 'apps' : { app : { 'AAA' : {} } } }
-        if conn_type == 'socket':
-            customizer = 'default'
-        else:
-            customizer = 'sample'
-        aaa_cfg = dict(radiusConnectionType = conn_type,
-                       radiusSecret = 'radius_password',
-                       radiusServerPort = '1812',
-                       packetCustomizer = customizer,
-                       vlanId = -1)
-        radius_networks = get_radius_networks(len(port_map['switch_radius_port_list']))
-        index = 0
-        for switch, ports in port_map['switch_radius_port_list']:
-            radius_macs = get_radius_macs(len(ports))
-            prefix, _, _ = radius_networks[index]
-            index += 1
-            aaa_cfg['nasIp'] = controller or cls.controller
-            aaa_cfg['nasMac'] = radius_macs[0]
-            aaa_cfg['radiusMac'] = radius_macs[0]
-            connect_points = []
-            radius_port = port_map[ ports[0] ]
-            radius_ip = '{}.{}'.format(prefix, radius_port)
-            if conn_type == 'socket':
-                radius_ip = os.getenv('ONOS_AAA_IP')
-            aaa_cfg['radiusIp'] = radius_ip
-            for dev in ovs_devices:
-                device_id = dev['id']
-                ports = OnosCtrl.get_ports_device(device_id, controller = controller)
-                radius_ports = filter(lambda p: p['isEnabled'] and 'annotations' in p and \
-                                      p['annotations']['portName'].startswith('r'),
-                                      ports)
-                if not radius_ports:
-                    continue
-                radius_port = radius_ports[0]['port']
-                connect_point = '{}/{}'.format(device_id, radius_port)
-                connect_points.append(connect_point)
-            aaa_cfg['radiusServerConnectPoints'] = connect_points
-            break
-
-        cfg['apps'][app]['AAA'] = aaa_cfg
-        cls.config(cfg, controller = controller)
-
-    @classmethod
-    def get_ovs_switch_map(cls, controller = None, olt_conf_file = ''):
-        port_map = None
-        #build ovs switch map
-        if olt_conf_file:
-            olt = OltConfig(olt_conf_file = olt_conf_file)
-            port_map, _ = olt.olt_port_map()
-
-        devices = cls.get_devices(controller = controller, mfr = 'Nicira')
-        switch_map = {}
-        for dev in devices:
-            device_id = dev['id']
-            serial = dev['serial']
-            ports = cls.get_ports_device(dev['id'], controller = controller)
-            ports = filter(lambda p: p['isEnabled'] and 'annotations' in p, ports)
-            #just create dummy ctag/uni port numbers
-            onu_ports = [1] * len(ports)
-            onu_names = map(lambda p: p['annotations']['portName'], ports)
-            onu_macs = map(lambda p: p['annotations']['portMac'], ports)
-            switch_map[device_id] = dict(uplink_vlan = 1,
-                                         serial = serial,
-                                         ports = onu_ports,
-                                         names = onu_names,
-                                         macs = onu_macs)
-        return switch_map
-
-    @classmethod
-    def sadis_load_config(cls, controller = None, olt_switch_map = {}, olt_conf_file = '', tagged_traffic = False):
-        sadis_app = 'org.opencord.sadis'
-        aaa_app = 'org.opencord.aaa'
-        sadis_cfg = {
-            'apps' : {
-                sadis_app : {
-                    'sadis' : {
-                        'integration' : {
-                            'cache' : {
-                                'enabled' : False,
-                                'maxsize' : 50,
-                                'ttl' : 'PT0m',
-                            },
-                        },
-                        'entries' : [],
-                    },
-                },
-            }
-        }
-        sadis_entries = sadis_cfg['apps'][sadis_app]['sadis']['entries']
-        nasId = '1/1/2'
-        nasPortId = '1/1/2'
-        switch_map = olt_switch_map.copy()
-        ovs_switch_map = cls.get_ovs_switch_map(controller = controller,
-                                                olt_conf_file = olt_conf_file)
-        #log_test.info('OVS switch map: %s' %ovs_switch_map)
-        switch_map.update(ovs_switch_map)
-        for device, entries in switch_map.iteritems():
-            uni_ports = entries['ports']
-            uni_port_names = entries['names']
-            uni_port_macs = entries['macs']
-            s_tag = entries['uplink_vlan']
-            serial = entries['serial']
-            #add entries for uni ports and device
-            for p in xrange(len(uni_ports)):
-                sadis_entry = dict(nasId = nasId, nasPortId = nasPortId, slot = 1)
-                sadis_entry['id'] = uni_port_names[p]
-                sadis_entry['hardwareIdentifier'] = uni_port_macs[p]
-                sadis_entry['cTag'] = uni_ports[p] if tagged_traffic else -1
-                sadis_entry['sTag'] = s_tag if tagged_traffic else -1
-                sadis_entry['port'] = uni_ports[p]
-                sadis_entry['ipAddress'] = controller or cls.controller
-                sadis_entries.append(sadis_entry)
-                #add entry for the device itself
-                sadis_entry = dict(nasId = nasId, nasPortId = nasPortId, slot = 1)
-                sadis_entry['id']  = serial
-                sadis_entry['hardwareIdentifier'] = cls.device_id_to_mac(device)
-                sadis_entry['cTag'] = uni_ports[p] if tagged_traffic else -1
-                sadis_entry['sTag'] = s_tag if tagged_traffic else -1
-                sadis_entry['port'] = uni_ports[p]
-                sadis_entry['ipAddress'] = controller or cls.controller
-                sadis_entries.append(sadis_entry)
-
-        #log_test.info('Sadis cfg: %s' %json.dumps(sadis_cfg, indent=4))
-        cls.config(sadis_cfg, controller = controller)
-
-    @classmethod
-    def config_olt_access(cls, uplink_vlan, controller = None, defaultVlan = '0', olt_conf_file = ''):
-        olt = OltConfig(olt_conf_file = olt_conf_file)
-        port_map, _ = olt.olt_port_map()
-        uplink = str(port_map['uplink'])
-        device_config = { 'devices' : {} }
-        ovs_devices = cls.get_devices(controller = controller, mfr = 'Nicira')
-        for dev in ovs_devices:
-            device_id = dev['id']
-            device_config['devices'][device_id] = {}
-            device_config['devices'][device_id]['basic'] = dict(driver = 'default')
-            device_config['devices'][device_id]['accessDevice'] = dict(uplink = uplink,
-                                                                       vlan = uplink_vlan,
-                                                                       defaultVlan = defaultVlan)
-
-        cls.config(device_config, controller = controller)
-
-    @classmethod
-    def config_olt_component(cls, controller = None, enableDhcpIgmpOnProvisioning = True, defaultVlan = 0):
-        if controller is None:
-            controller = cls.controller
-        olt_property_url = 'configuration/org.opencord.olt.impl.Olt'
-        property_url = 'http://{}:8181/onos/v1/{}'.format(controller, olt_property_url)
-        cfg = dict(enableDhcpIgmpOnProvisioning = enableDhcpIgmpOnProvisioning, defaultVlan = defaultVlan)
-        resp = requests.post(property_url, auth = cls.auth, data = json.dumps(cfg))
-        return resp.ok, resp.status_code
-
-    @classmethod
-    def config_extraneous_flows(cls, controller = None, enable = True):
-        if controller is None:
-            controller = cls.controller
-        flow_property_url = 'configuration/org.onosproject.net.flow.impl.FlowRuleManager'
-        property_url = 'http://{}:8181/onos/v1/{}'.format(controller, flow_property_url)
-        cfg = dict(allowExtraneousRules = enable)
-        resp = requests.post(property_url, auth = cls.auth, data = json.dumps(cfg))
-        return resp.ok, resp.status_code
diff --git a/src/test/utils/OnosFlowCtrl.py b/src/test/utils/OnosFlowCtrl.py
deleted file mode 100644
index b1193f5..0000000
--- a/src/test/utils/OnosFlowCtrl.py
+++ /dev/null
@@ -1,324 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import json
-import requests
-import os,sys,time
-from nose.tools import *
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from OnosCtrl import OnosCtrl, get_controller
-from CordTestUtils import log_test
-
-class OnosFlowCtrl:
-
-    auth = ('karaf', 'karaf')
-    controller = get_controller()
-    cfg_url = 'http://%s:8181/onos/v1/flows/' %(controller)
-
-    def __init__( self,
-                  deviceId,
-                  appId=0,
-                  ingressPort="",
-                  egressPort="",
-                  ethType="",
-                  ethSrc="",
-                  ethDst="",
-                  vlan="",
-                  ipProto="",
-                  ipSrc=(),
-                  ipDst=(),
-                  tcpSrc="",
-                  tcpDst="",
-                  udpDst="",
-                  udpSrc="",
-                  mpls="",
-		  dscp="",
-		  icmpv4_type="",
-		  icmpv4_code="",
-		  icmpv6_type="",
-		  icmpv6_code="",
-		  ipv6flow_label="",
-		  ecn="",
-		  ipv6_target="",
-		  ipv6_sll="",
-		  ipv6_tll="",
-		  ipv6_extension="",
-		  controller=None):
-        self.deviceId = deviceId
-        self.appId = appId
-        self.ingressPort = ingressPort
-        self.egressPort = egressPort
-        self.ethType = ethType
-        self.ethSrc = ethSrc
-        self.ethDst = ethDst
-        self.vlan = vlan
-        self.ipProto = ipProto
-        self.ipSrc = ipSrc
-        self.ipDst = ipDst
-        self.tcpSrc = tcpSrc
-        self.tcpDst = tcpDst
-        self.udpDst = udpDst
-        self.udpSrc = udpSrc
-        self.mpls = mpls
-        self.dscp = dscp
-	self.icmpv4_type = icmpv4_type
-	self.icmpv4_code = icmpv4_code
-	self.icmpv6_type = icmpv6_type
-	self.icmpv6_code = icmpv6_code
-	self.ipv6flow_label = ipv6flow_label
-	self.ecn = ecn
-	self.ipv6_target = ipv6_target
-	self.ipv6_sll = ipv6_sll
-	self.ipv6_tll = ipv6_tll
-	self.ipv6_extension = ipv6_extension
-	if controller is not None:
-		self.controller=controller
-		self.cfg_url = 'http://%s:8181/onos/v1/flows/' %(self.controller)
-
-    @classmethod
-    def get_flows(cls, device_id,controller=None):
-        return OnosCtrl.get_flows(device_id,controller=controller)
-
-    def addFlow(self):
-        """
-        Description:
-            Creates a single flow in the specified device
-        Required:
-            * deviceId: id of the device
-        Optional:
-            * ingressPort: port ingress device
-            * egressPort: port  of egress device
-            * ethType: specify ethType
-            * ethSrc: specify ethSrc ( i.e. src mac addr )
-            * ethDst: specify ethDst ( i.e. dst mac addr )
-            * ipProto: specify ip protocol
-            * ipSrc: specify ip source address with mask eg. ip#/24
-                as a tuple (type, ip#)
-            * ipDst: specify ip destination address eg. ip#/24
-                as a tuple (type, ip#)
-            * tcpSrc: specify tcp source port
-            * tcpDst: specify tcp destination port
-        Returns:
-            True for successful requests;
-            False for failure/error on requests
-        """
-        flowJson = { "priority":100,
-                     "isPermanent":"true",
-                     "timeout":0,
-                     "deviceId":self.deviceId,
-                     "treatment":{"instructions":[]},
-                     "selector": {"criteria":[]}}
-        if self.appId:
-            flowJson[ "appId" ] = self.appId
-
-        if self.egressPort:
-            flowJson[ 'treatment' ][ 'instructions' ].append( {
-                    "type":"OUTPUT",
-                    "port":self.egressPort } )
-        if self.ingressPort:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"IN_PORT",
-                    "port":self.ingressPort } )
-        if self.ethType:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"ETH_TYPE",
-                    "ethType":self.ethType } )
-        if self.ethSrc:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"ETH_SRC",
-                    "mac":self.ethSrc } )
-        if self.ethDst:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"ETH_DST",
-                    "mac":self.ethDst } )
-        if self.vlan:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"VLAN_VID",
-                    "vlanId":self.vlan } )
-        if self.mpls:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"MPLS_LABEL",
-                    "label":self.mpls } )
-        if self.ipSrc:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":self.ipSrc[0],
-                    "ip":self.ipSrc[1] } )
-        if self.ipDst:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":self.ipDst[0],
-                    "ip":self.ipDst[1] } )
-        if self.tcpSrc:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"TCP_SRC",
-                    "tcpPort": self.tcpSrc } )
-        if self.tcpDst:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"TCP_DST",
-                    "tcpPort": self.tcpDst } )
-        if self.udpSrc:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"UDP_SRC",
-                    "udpPort": self.udpSrc } )
-        if self.udpDst:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"UDP_DST",
-                    "udpPort": self.udpDst } )
-        if self.ipProto:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"IP_PROTO",
-                    "protocol": self.ipProto } )
-        if self.dscp:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"IP_DSCP",
-                    "ipDscp": self.dscp } )
-
-        if self.icmpv4_type:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'ICMPV4_TYPE',
-                    "icmpType":self.icmpv4_type } )
-
-        if self.icmpv6_type:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'ICMPV6_TYPE',
-                    "icmpv6Type":self.icmpv6_type } )
-
-        if self.icmpv4_code:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'ICMPV4_CODE',
-                    "icmpCode": self.icmpv4_code } )
-
-        if self.icmpv6_code:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'ICMPV6_CODE',
-                    "icmpv6Code": self.icmpv6_code } )
-
-        if self.ipv6flow_label:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'IPV6_FLABEL',
-                    "flowLabel": self.ipv6flow_label } )
-
-        if self.ecn:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":"IP_ECN",
-                    "ipEcn": self.ecn } )
-
-        if self.ipv6_target:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'IPV6_ND_TARGET',
-                    "targetAddress": self.ipv6_target } )
-
-        if self.ipv6_sll:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'IPV6_ND_SLL',
-                    "mac": self.ipv6_sll } )
-
-        if self.ipv6_tll:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'IPV6_ND_TLL',
-                    "mac": self.ipv6_tll } )
-
-
-        if self.ipv6_extension:
-            flowJson[ 'selector' ][ 'criteria' ].append( {
-                    "type":'IPV6_EXTHDR',
-                    "exthdrFlags": self.ipv6_extension } )
-
-
-
-
-        return self.sendFlow( deviceId=self.deviceId, flowJson=flowJson)
-
-    def removeFlow(self, deviceId, flowId):
-        """
-        Description:
-            Remove specific device flow
-        Required:
-            str deviceId - id of the device
-            str flowId - id of the flow
-        Return:
-            Returns True if successfully deletes flows, otherwise False
-        """
-        # NOTE: REST url requires the intent id to be in decimal form
-        query = self.cfg_url + str( deviceId ) + '/' + str( int( flowId ) )
-        response = requests.delete(query, auth = self.auth)
-        if response:
-            if 200 <= response.status_code <= 299:
-                return True
-            else:
-                return False
-
-        return True
-
-    def findFlow(self, deviceId, **criterias):
-        flows = self.get_flows(deviceId,controller=self.controller)
-        match_keys = criterias.keys()
-        matches = len(match_keys)
-        num_matched = 0
-        for f in flows:
-            criteria = f['selector']['criteria']
-            for c in criteria:
-                if c['type'] not in match_keys:
-                    continue
-                match_key, match_val = criterias.get(c['type'])
-                val = c[match_key]
-                if val == match_val:
-                    num_matched += 1
-                if num_matched == matches:
-                    return f['id']
-        return None
-
-    def sendFlow(self, deviceId, flowJson):
-        """
-        Description:
-            Sends a single flow to the specified device. This function exists
-            so you can bypass the addFLow driver and send your own custom flow.
-        Required:
-            * The flow in json
-            * the device id to add the flow to
-        Returns:
-            True for successful requests
-            False for error on requests;
-        """
-        url = self.cfg_url + str(deviceId)
-        response = requests.post(url, auth = self.auth, data = json.dumps(flowJson) )
-        if response.ok:
-            if response.status_code in [200, 201]:
-                log_test.info('Successfully POSTED flow for device %s' %str(deviceId))
-                return True
-            else:
-                log_test.info('Post flow for device %s failed with status %d' %(str(deviceId),
-                                                                           response.status_code))
-                return False
-        else:
-            log_test.error('Flow post request returned with status %d' %response.status_code)
-
-        return False
diff --git a/src/test/utils/OnosLog.py b/src/test/utils/OnosLog.py
deleted file mode 100644
index 5ce6838..0000000
--- a/src/test/utils/OnosLog.py
+++ /dev/null
@@ -1,109 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import re
-from SSHTestAgent import SSHTestAgent
-
-class OnosLog(object):
-    CLI_USER = 'karaf'
-    CLI_PASSWD = 'karaf'
-    CLI_PORT = 8101
-    KARAF_VERSION = os.getenv('KARAF_VERSION', '3.0.8')
-    HOST = os.getenv('ONOS_CONTROLLER_IP', '172.17.0.2').split(',')[0]
-    last_snapshot_map = {}
-
-    def __init__(self, host = HOST, log_file = None):
-        if log_file is None:
-            log_file = '/root/onos/apache-karaf-{}/data/log/karaf.log'.format(self.KARAF_VERSION)
-        self.log_file = log_file
-        self.ssh_agent = SSHTestAgent(host = host, user = self.CLI_USER,
-                                      password = self.CLI_PASSWD, port = self.CLI_PORT)
-        if not OnosLog.last_snapshot_map.has_key(host):
-            OnosLog.last_snapshot_map[host] = []
-
-    @classmethod
-    def get_last_snapshot(cls, host):
-        if cls.last_snapshot_map.has_key(host):
-            return cls.last_snapshot_map[host]
-        return []
-
-    @classmethod
-    def update_last_snapshot(cls, host, res):
-        cls.last_snapshot_map[host] = res
-
-    def get_log(self, search_terms = None, exception = True, cache_result = True):
-        """Run the command on the test host"""
-        cmd = 'cat {}'.format(self.log_file)
-        st, output = self.ssh_agent.run_cmd(cmd)
-        if st is False:
-            return st, output
-        exception_map = {'Exception' : [] }
-        last_snapshot = self.get_last_snapshot(self.ssh_agent.host)
-        lines = output.splitlines()
-        if search_terms:
-            if type(search_terms) is str:
-                terms = [ search_terms ]
-            else:
-                terms = list(search_terms)
-            if exception is True and 'Exception' not in terms:
-                terms.append('Exception')
-            match_lines = []
-            last_len = len(last_snapshot)
-            for i in xrange(0, len(lines)):
-                if i < last_len and lines[i] in last_snapshot:
-                    ##skip lines matching the last snapshot
-                    continue
-                for t in terms:
-                    if lines[i].find(t) >= 0:
-                        match_lines.append(lines[i])
-                        if t == 'Exception':
-                            exception_map[t] = lines[i+1:i+1+10]
-            output = '\n'.join(match_lines)
-            if len(exception_map['Exception']) > 0:
-                output += '\nException:\n'
-                output += '\n'.join(exception_map['Exception'])
-
-        #update the last snapshot
-        if cache_result is True:
-            self.update_last_snapshot(self.ssh_agent.host, lines)
-        return st, output
-
-    def search_log_pattern(self, pattern):
-        r_pat = re.compile(pattern)
-        cmd = 'cat {}'.format(self.log_file)
-        st, output = self.ssh_agent.run_cmd(cmd)
-        if st is False:
-            return None
-        return r_pat.findall(output)
-
-if __name__ == '__main__':
-    onos = os.getenv('ONOS_CONTROLLER_IP', '172.17.0.2')
-    onos_log = OnosLog(host = onos)
-    print('Checking for INFO')
-    st, output = onos_log.get_log('INFO')
-    print(st, output)
-    print('\n\nChecking for ERROR\n\n')
-    st, output = onos_log.get_log('ERROR')
-    print(st, output)
-    print('Checking for ERROR and INFO')
-    st, output = onos_log.get_log(('ERROR', 'INFO'))
-    print(st, output)
-    pat = onos_log.search_log_pattern('ApplicationManager .* Started')
-    if pat:
-        print(pat)
-    else:
-        print('Onos did not start')
diff --git a/src/test/utils/Perf.py b/src/test/utils/Perf.py
deleted file mode 100644
index a39b703..0000000
--- a/src/test/utils/Perf.py
+++ /dev/null
@@ -1,89 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import subprocess
-import requests
-import json
-import time
-from CordTestUtils import log_test as log, getstatusoutput, get_controller
-from OnosCtrl import OnosCtrl
-
-class perf(object):
-    def  __init__(self, controller, interface = 'eth0'):
-         self.controller = controller
-         self.interface = interface
-
-    def  know_cpu_freq(self):
-         freq = open('/proc/cpuinfo/','r')
-         freqs = freq.read()
-         freq.seek(0)
-         cpuentry = freq.readline()
-         cpusplit = cpuentry.split()
-
-         while cpusplit[0] != "cpu":
-              while cpusplit[0] != "MHz":
-                    cpuline = freq.readline()
-                    cpusplit = cpuline.split()
-         freq.close()
-         cpu_mhz = cpusplit[3]
-         return cpu_mhz
-
-    def  retrieve_cpu_stats(self):
-         cpu = open('/proc/stat/','r').readlines()[0]
-         return map(float, cpu.split()[1:5])
-
-    def  validate_cpu_performance(self, interval):
-         time_stamp1 = retrieve_cpu_stats()
-         time.sleep(interval)
-         time_stamp2 = retrieve_cpu_stats()
-         diff = [time_stamp2[i] - time_stamp1[i] for i in range(len(time_stamp1))]
-         try:
-             return 1.0 - (diff[-1:].pop()/(sum(diff)*1.0))
-         except:
-             return 0.0
-
-    def  memory_usage(self):
-         cmd_run = subprocess.check_output(['free','-b'])
-         memory = cmd_run.split()
-         total = int(memory[7])
-         used = int(memory[8])
-         free = int(memory[9])
-         return total, used, free
-
-    def  rx_network_stats(self, intf):
-         for entry in open('/proc/net/dev', 'r'):
-             if intf in entry:
-                stat = entry.split('%s:' % intf)[1].split()
-                rx_bytes = stat[0]
-                rx_packets = stat[1]
-                rx_errors = stat[2]
-                rx_drops = stat[3]
-         return int(rx_bytes), int(rx_packets), int(rx_errors), int(rx_drops)
-
-    def  tx_network_stats(self, intf):
-         for entry in open('/proc/net/dev', 'r'):
-             if intf in entry:
-                stat = entry.split('%s:' % intf)[1].split()
-                tx_bytes = stat[8]
-                tx_packets = stat[9]
-                tx_errors = stat[10]
-                tx_drops = stat[11]
-         return int(tx_bytes), int(tx_packets), int(tx_errors), int(tx_drops)
-
-    def  check_node_uptime(self):
-         return float(open('/proc/uptime','r').read().split(' ')[0])
-
diff --git a/src/test/utils/SSHTestAgent.py b/src/test/utils/SSHTestAgent.py
deleted file mode 100644
index 14bebd5..0000000
--- a/src/test/utils/SSHTestAgent.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os, sys, time
-from paramiko import SSHClient, WarningPolicy, AutoAddPolicy
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-from CordTestUtils import log_test
-
-class SSHTestAgent(object):
-    key_file = os.getenv('SSH_KEY_FILE', None)
-    host = os.getenv('CORD_TEST_HOST', '172.17.0.1')
-    hosts_file = os.path.join(os.getenv('HOME'), '.ssh', 'known_hosts')
-    user = 'ubuntu'
-    password = None
-
-    def __init__(self, host = host, user = user, password = password, port = 22):
-        self.host = host
-        self.user = user
-        self.password = password
-        self.port = port
-        self.client = SSHClient()
-        self.client.set_missing_host_key_policy(AutoAddPolicy())
-
-    def run_cmd(self, cmd, timeout = 5):
-        """Run the command on the test host"""
-        host_remove = 'ssh-keygen -f "%s" -R [%s]:8101 2>/dev/null' %(self.hosts_file, self.host)
-        try:
-            os.system(host_remove)
-        except: pass
-
-        try:
-            self.client.connect(self.host, username = self.user, password = self.password,
-                                key_filename = self.key_file, timeout=timeout, port = self.port)
-        except:
-            log_test.error('Unable to connect to test host %s' %self.host)
-            return False, None
-
-        channel = self.client.get_transport().open_session()
-        channel.exec_command(cmd)
-        status_ready = False
-        if channel.exit_status_ready():
-            status = channel.recv_exit_status()
-            status_ready = True
-        else:
-            status = 0
-        output = None
-        st = status == 0
-        if st:
-            output = ''
-            while True:
-                data = channel.recv(4096)
-                if data:
-                    output += data
-                else:
-                    break
-        if status_ready is False:
-            status = channel.recv_exit_status()
-            st = status == 0
-        time.sleep(0.1)
-        channel.close()
-        self.client.close()
-        return st, output
-
-if __name__ == '__main__':
-    agent = SSHTestAgent(user = 'ubuntu', password = 'ubuntu')
-    cmds = ('docker images', 'docker ps')
-    for cmd in cmds:
-        st, output = agent.run_cmd(cmd)
-        print('Command \"%s\" returned with status: %s' %(cmd, st))
-        if st:
-            print('%s\n' %output)
diff --git a/src/test/utils/Scale.py b/src/test/utils/Scale.py
deleted file mode 100644
index 6c14872..0000000
--- a/src/test/utils/Scale.py
+++ /dev/null
@@ -1,1079 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-from nose.tools import *
-from scapy.all import *
-import requests
-from twisted.internet import defer
-from nose.twistedtools import reactor, deferred
-from CordTestUtils import *
-from CordTestUtils import log_test as log
-from OltConfig import OltConfig
-from onosclidriver import OnosCliDriver
-from SSHTestAgent import SSHTestAgent
-from Channels import Channels, IgmpChannel
-from IGMP import *
-import time, monotonic
-from CordLogger import CordLogger
-from VSGAccess import VSGAccess
-#imports for cord-subscriber module
-from subscriberDb import SubscriberDB
-from Stats import Stats
-from threadPool import ThreadPool
-import threading
-from EapTLS import TLSAuthTest
-from CordTestUtils import log_test as log
-from CordTestConfig import setup_module, running_on_ciab
-from OnosCtrl import OnosCtrl
-from CordContainer import Onos
-from CordSubscriberUtils import CordSubscriberUtils, XosUtils
-from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_shell, cord_test_radius_restart
-
-
-log.setLevel('INFO')
-
-class Subscriber(Channels):
-      log.info('in Subscriber class 0000000')
-      PORT_TX_DEFAULT = 2
-      PORT_RX_DEFAULT = 1
-      INTF_TX_DEFAULT = 'veth2'
-      INTF_RX_DEFAULT = 'veth0'
-      STATS_RX = 0
-      STATS_TX = 1
-      STATS_JOIN = 2
-      STATS_LEAVE = 3
-      SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
-
-      def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
-                   num = 1, channel_start = 0,
-                   tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
-                   iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
-                   mcast_cb = None, loginType = 'wireless'):
-            self.tx_port = tx_port
-            self.rx_port = rx_port
-            self.port_map = port_map or g_subscriber_port_map
-            try:
-                  self.tx_intf = self.port_map[tx_port]
-                  self.rx_intf = self.port_map[rx_port]
-            except:
-                  self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
-                  self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
-
-            log_test.info('Subscriber %s, rx interface %s, uplink interface %s' %(name, self.rx_intf, self.tx_intf))
-            Channels.__init__(self, num, channel_start = channel_start,
-                              iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
-            self.name = name
-            self.service = service
-            self.service_map = {}
-            services = self.service.strip().split(' ')
-            for s in services:
-                  self.service_map[s] = True
-            self.loginType = loginType
-            ##start streaming channels
-            self.join_map = {}
-            ##accumulated join recv stats
-            self.join_rx_stats = Stats()
-            self.recv_timeout = False
-      def has_service(self, service):
-            if self.service_map.has_key(service):
-                  return self.service_map[service]
-            if self.service_map.has_key(service.upper()):
-                  return self.service_map[service.upper()]
-            return False
-
-      def channel_join_update(self, chan, join_time):
-            self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
-            self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
-      def channel_join(self, chan = 0, delay = 2):
-            '''Join a channel and create a send/recv stats map'''
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.delay = delay
-            chan, join_time = self.join(chan)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_join_next(self, delay = 2, leave_flag = True):
-            '''Joins the next channel leaving the last channel'''
-            if self.last_chan:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.join_next(leave_flag = leave_flag)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_jump(self, delay = 2):
-            '''Jumps randomly to the next channel leaving the last channel'''
-            if self.last_chan is not None:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.jump()
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_leave(self, chan = 0, force = False):
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.leave(chan, force = force)
-
-      def channel_update(self, chan, stats_type, packets, t=0):
-            if type(chan) == type(0):
-                  chan_list = (chan,)
-            else:
-                  chan_list = chan
-            for c in chan_list:
-                  if self.join_map.has_key(c):
-                        self.join_map[c][stats_type].update(packets = packets, t = t)
-      def channel_receive(self, chan, cb = None, count = 1, timeout = 5):
-            log_test.info('Subscriber %s on port %s receiving from group %s, channel %d' %
-                     (self.name, self.rx_intf, self.gaddr(chan), chan))
-            r = self.recv(chan, cb = cb, count = count, timeout = timeout)
-            if len(r) == 0:
-                  log_test.info('Subscriber %s on port %s timed out' %(self.name, self.rx_intf))
-            else:
-                  log_test.info('Subscriber %s on port %s received %d packets' %(self.name, self.rx_intf, len(r)))
-            if self.recv_timeout:
-                  ##Negative test case is disabled for now
-                  assert_equal(len(r), 0)
-
-      def recv_channel_cb(self, pkt):
-            ##First verify that we have received the packet for the joined instance
-            log_test.info('Packet received for group %s, subscriber %s, port %s' %
-                     (pkt[IP].dst, self.name, self.rx_intf))
-            if self.recv_timeout:
-                  return
-            chan = self.caddr(pkt[IP].dst)
-            assert_equal(chan in self.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.join_map[chan][self.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.channel_update(chan, self.STATS_RX, 1, t = delta)
-            log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-
-class subscriber_pool:
-
-      def __init__(self, subscriber, test_cbs):
-            self.subscriber = subscriber
-            self.test_cbs = test_cbs
-
-      def pool_cb(self):
-            for cb in self.test_cbs:
-                  if cb:
-                        self.test_status = cb(self.subscriber)
-                        if self.test_status is not True:
-                           ## This is chaning for other sub status has to check again
-                           self.test_status = True
-                           log_test.info('This service is failed and other services will not run for this subscriber')
-                           break
-            log_test.info('This Subscriber is tested for multiple service eligibility ')
-            self.test_status = True
-
-class scale(object):
-
-    USER = "vagrant"
-    PASS = "vagrant"
-    head_node = os.getenv('HEAD_NODE', 'prod')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    MAX_PORTS = 100
-    device_id = 'of:' + get_mac()
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    olt = OltConfig(olt_conf_file = olt_conf_file)
-    APP_NAME = 'org.ciena.xconnect'
-    olt_apps = ()
-    table_app = 'org.ciena.cordigmp'
-    table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
-    app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
-    cpqd_path = os.path.join(test_path, '..', 'setup')
-    ovs_path = cpqd_path
-    test_services = ('IGMP', 'TRAFFIC')
-    num_joins = 0
-    num_subscribers = 0
-    leave_flag = True
-    recv_timeout = False
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-    PORT_TX_DEFAULT = 2
-    PORT_RX_DEFAULT = 1
-    IP_DST = '224.0.0.22'
-    IGMP_DST_MAC = "01:00:5e:00:00:16"
-    igmp_eth = Ether(dst = IGMP_DST_MAC, type = ETH_P_IP)
-    igmp_ip = IP(dst = IP_DST)
-	INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    ingress_iface = 1
-    egress_iface = 2
-    MAX_PORTS = 100
-    CURRENT_PORT_NUM = egress_iface
-    ACL_SRC_IP = '192.168.20.3/32'
-    ACL_DST_IP = '192.168.30.2/32'
-    ACL_SRC_IP_RULE_2 = '192.168.40.3/32'
-    ACL_DST_IP_RULE_2 = '192.168.50.2/32'
-    ACL_SRC_IP_PREFIX_24 = '192.168.20.3/24'
-    ACL_DST_IP_PREFIX_24 = '192.168.30.2/24'
-    HOST_DST_IP = '192.168.30.0/24'
-    HOST_DST_IP_RULE_2 = '192.168.50.0/24'
-
-
-
-
-    CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-
-############ IGMP utility functions #######################
-    def onos_ssm_table_load(self, groups, src_list = ['1.2.3.4'],flag = False):
-          ssm_dict = {'apps' : { 'org.opencord.igmp' : { 'ssmTranslate' : [] } } }
-          ssm_xlate_list = ssm_dict['apps']['org.opencord.igmp']['ssmTranslate']
-          if flag: #to maintain seperate group-source pair.
-              for i in range(len(groups)):
-                  d = {}
-                  d['source'] = src_list[i] or '0.0.0.0'
-                  d['group'] = groups[i]
-                  ssm_xlate_list.append(d)
-          else:
-              for g in groups:
-                  for s in src_list:
-                      d = {}
-                      d['source'] = s or '0.0.0.0'
-                      d['group'] = g
-                      ssm_xlate_list.append(d)
-          self.onos_load_config(ssm_dict)
-          cord_port_map = {}
-          for g in groups:
-                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
-          IgmpChannel().cord_port_table_load(cord_port_map)
-          time.sleep(2)
-
-    def generate_random_multicast_ip_addresses(self,count=500):
-        multicast_ips = []
-        while(count >= 1):
-                ip = '.'.join([str(random.randint(224,239)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
-                if ip in multicast_ips:
-                    pass
-                else:
-                    multicast_ips.append(ip)
-                    count -= 1
-        return multicast_ips
-
-    def generate_random_unicast_ip_addresses(self,count=1):
-        unicast_ips = []
-        while(count >= 1):
-                ip = '.'.join([str(random.randint(11,126)),str(random.randint(1,254)),str(random.randint(1,254)),str(random.randint(1,254))])
-                if ip in unicast_ips:
-                    pass
-                else:
-                    unicast_ips.append(ip)
-                    count -= 1
-        return unicast_ips
-
-    def iptomac(self, mcast_ip):
-        mcast_mac =  '01:00:5e:'
-        octets = mcast_ip.split('.')
-        second_oct = int(octets[1]) & 127
-        third_oct = int(octets[2])
-        fourth_oct = int(octets[3])
-        mcast_mac = mcast_mac + format(second_oct,'02x') + ':' + format(third_oct, '02x') + ':' + format(fourth_oct, '02x')
-        return mcast_mac
-
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], record_type=IGMP_V3_GR_TYPE_INCLUDE,
-                       ip_pkt = None, iface = 'veth0', ssm_load = False, delay = 1):
-        if ssm_load is True:
-              self.onos_ssm_table_load(groups, src_list)
-        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr=self.IP_DST)
-        for g in groups:
-              gr = IGMPv3gr(rtype= record_type, mcaddr=g)
-              gr.sources = src_list
-              igmp.grps.append(gr)
-        if ip_pkt is None:
-              ip_pkt = self.igmp_eth/self.igmp_ip
-        pkt = ip_pkt/igmp
-        IGMPv3.fixup(pkt)
-        log.info('sending igmp join packet %s'%pkt.show())
-        sendp(pkt, iface=iface)
-        time.sleep(delay)
-
-    def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
-        dst_mac = self.iptomac(group)
-        eth = Ether(dst= dst_mac)
-        ip = IP(dst=group,src=source)
-        data = repr(monotonic.monotonic())
-        sendp(eth/ip/data,count=20, iface = intf)
-
-    def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
-        log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
-        self.success = False
-        def recv_task():
-            def igmp_recv_cb(pkt):
-                #log_test.info('received multicast data packet is %s'%pkt.show())
-                log_test.info('multicast data received for group %s from source %s'%(group,source))
-                self.success = True
-            sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
-        t = threading.Thread(target = recv_task)
-        t.start()
-        self.send_multicast_data_traffic(group,source=source)
-        t.join()
-        return self.success
-##################### acl utility functions ###############################
-
-    @classmethod
-    def acl_hosts_add(cls, dstHostIpMac, egress_iface_count = 1,  egress_iface_num = None):
-        index = 0
-        if egress_iface_num is None:
-            egress_iface_num = cls.egress_iface
-        for ip,_ in dstHostIpMac:
-            egress = cls.port_map[egress_iface_num]
-            log.info('Assigning ip %s to interface %s' %(ip, egress))
-            config_cmds_egress = ( 'ifconfig {} 0'.format(egress),
-                                   'ifconfig {0} up'.format(egress),
-                                   'ifconfig {0} {1}'.format(egress, ip),
-                                   'arping -I {0} {1} -c 2'.format(egress, ip.split('/')[0]),
-                                   'ifconfig {0}'.format(egress),
-                                 )
-            for cmd in config_cmds_egress:
-                os.system(cmd)
-            index += 1
-            if index == egress_iface_count:
-               break
-            egress_iface_count += 1
-            egress_iface_num += 1
-    @classmethod
-    def acl_hosts_remove(cls, egress_iface_count = 1,  egress_iface_num = None):
-        if egress_iface_num is None:
-           egress_iface_num = cls.egress_iface
-        n = 0
-        for n in range(egress_iface_count):
-           egress = cls.port_map[egress_iface_num]
-           config_cmds_egress = ('ifconfig {} 0'.format(egress))
-           os.system(config_cmds_egress)
-           egress_iface_num += 1
-    def acl_rule_traffic_send_recv(self, srcMac, dstMac, srcIp, dstIp, ingress =None, egress=None, ip_proto=None, dstPortNum = None, positive_test = True):
-        if ingress is None:
-           ingress = self.ingress_iface
-        if egress is None:
-           egress = self.egress_iface
-        ingress = self.port_map[ingress]
-        egress = self.port_map[egress]
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dstIp.split('/')[0] and p[IP].src == srcIp.split('/')[0],
-                  prn = recv_cb, iface = egress)
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = srcMac, dst = dstMac)
-        L3 = IP(src = srcIp.split('/')[0], dst = dstIp.split('/')[0])
-        pkt = L2/L3
-		        log.info('Sending a packet with dst ip %s, src ip %s , dst mac %s src mac %s on port %s to verify if flows are correct' %
-                 (dstIp.split('/')[0], srcIp.split('/')[0], dstMac, srcMac, ingress))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = ingress)
-        t.join()
-        assert_equal(self.success, True)
-
-
-
-############################# vrouter utility functiuons ####################
-    @classmethod
-    def vrouter_setup(cls):
-        apps = ('org.onosproject.proxyarp', 'org.onosproject.hostprovider', 'org.onosproject.vrouter', 'org.onosproject.fwd')
-        for app in apps:
-            OnosCtrl(app).activate()
-        cls.port_map, cls.port_list = cls.olt.olt_port_map()
-        cls.vrouter_device_dict = { "devices" : {
-                "{}".format(cls.device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-        cls.zebra_conf = '''
-password zebra
-log stdout
-service advanced-vty
-!
-!debug zebra rib
-!debug zebra kernel
-!debug zebra fpm
-!
-!interface eth1
-! ip address 10.10.0.3/16
-line vty
- exec-timeout 0 0
-'''
-    @classmethod
-    def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
-        log_test.info('Restarting Quagga container with configuration for %d networks' %(networks))
-        config = cls.generate_conf(networks = networks, peer_address = peer_address, router_address = router_address)
-        if networks <= 10000:
-            boot_delay = 25
-        else:
-            delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
-            n = min(networks/100000, len(delay_map)-1)
-            boot_delay = delay_map[n]
-        cord_test_quagga_restart(config = config, boot_delay = boot_delay)
-    @classmethod
-    def generate_vrouter_conf(cls, networks = 4, peers = 1, peer_address = None, router_address = None):
-        num = 0
-        if peer_address is None:
-           start_peer = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
-           end_peer =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        else:
-           ip = peer_address[0][0]
-           start_ip = ip.split('.')
-           start_peer = ( int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_peer =   ((int(start_ip[0]) + 8) << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        local_network = end_peer + 1
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        peer_list = []
-        for n in xrange(start_peer, end_peer, 256):
-            port_map = ports_dict['ports']
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 2
-            peer_ip = n + 1
-            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
-            peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
-            mac = RandMAC()._fix()
-            peer_list.append((peer, mac))
-            if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == peers:
-                break
-        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
-        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
-        quagga_router_dict['ospfEnabled'] = True
-        quagga_router_dict['interfaces'] = interface_list
-        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-
-        #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
-        bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
-        speaker_dict = {}
-        speaker_dict['name'] = 'bgp{}'.format(peers+1)
-        speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-        speaker_dict['peers'] = peer_list
-        bgp_speakers_list.append(speaker_dict)
-        cls.peer_list = peer_list
-        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
-    @classmethod
-    def generate_conf(cls, networks = 4, peer_address = None, router_address = None):
-        num = 0
-        if router_address is None:
-            start_network = ( 11 << 24) | ( 10 << 16) | ( 10 << 8) | 0
-            end_network =   ( 172 << 24 ) | ( 0 << 16)  | (0 << 8) | 0
-            network_mask = 24
-        else:
-           ip = router_address
-           start_ip = ip.split('.')
-           network_mask = int(start_ip[3].split('/')[1])
-           start_ip[3] = (start_ip[3].split('/'))[0]
-           start_network = (int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_network = (172 << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        net_list = []
-        peer_list = peer_address if peer_address is not None else cls.peer_list
-        network_list = []
-        for n in xrange(start_network, end_network, 256):
-            net = '%d.%d.%d.0'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
-            network_list.append(net)
-            gateway = peer_list[num % len(peer_list)][0]
-            net_route = 'ip route {0}/{1} {2}'.format(net, network_mask, gateway)
-            net_list.append(net_route)
-            num += 1
-            if num == networks:
-                break
-        cls.network_list = network_list
-        cls.network_mask = network_mask
-        zebra_routes = '\n'.join(net_list)
-        #log_test.info('Zebra routes: \n:%s\n' %cls.zebra_conf + zebra_routes)
-        return cls.zebra_conf + zebra_routes
-
-    @classmethod
-    def vrouter_host_load(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            log_test.info('Assigning ip %s to interface %s' %(host, iface))
-            config_cmds = ( 'ifconfig {} 0'.format(iface),
-                            'ifconfig {0} {1}'.format(iface, host),
-                            'arping -I {0} {1} -c 2'.format(iface, host),
-                            )
-            for cmd in config_cmds:
-                os.system(cmd)
-    @classmethod
-    def vrouter_host_unload(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            config_cmds = ('ifconfig {} 0'.format(iface), )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def vrouter_config_get(cls, networks = 4, peers = 1, peer_address = None,
-                           route_update = None, router_address = None):
-        vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers,
-                                                    peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    @classmethod
-    def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,
-                          route_update = None, router_address = None, time_expire = None, adding_new_routes = None):
-        vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,
-                                                 peer_address = peer_address, route_update = route_update)
-        cls.start_onos(network_cfg = vrouter_configs)
-        time.sleep(5)
-        cls.vrouter_host_load()
-        ##Start quagga
-        cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-    def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
-                  prn = recv_cb, iface = self.port_map[ingress])
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = src_mac, dst = dst_mac)
-        L3 = IP(src = src_ip, dst = dst_ip)
-        pkt = L2/L3
-        log_test.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
-                 (dst_ip, dst_mac, self.port_map[egress]))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = self.port_map[egress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def vrouter_traffic_verify(self, positive_test = True, peer_address = None):
-        if peer_address is None:
-            peers = len(self.peer_list)
-            peer_list = self.peer_list
-        else:
-            peers = len(peer_address)
-            peer_list = peer_address
-        egress = peers + 1
-        num = 0
-        num_hosts = 5 if positive_test else 1
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        if self.network_mask != 24:
-            peers = 1
-        for network in self.network_list:
-            num_ips = num_hosts
-            octets = network.split('.')
-            for i in xrange(num_ips):
-                octets[-1] = str(int(octets[-1]) + 1)
-                dst_ip = '.'.join(octets)
-                dst_mac = peer_list[ num % peers ] [1]
-                port = (num % peers)
-                ingress = port + 1
-                #Since peers are on the same network
-                ##Verify if flows are setup by sending traffic across
-                self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
-            num += 1
-    def vrouter_network_verify(self, networks, peers = 1, positive_test = True,
-                                 start_network = None, start_peer_address = None, route_update = None,
-                                 invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
-                                 deactivate_activate_vrouter = None, adding_new_routes = None):
-	print 'no.of networks are.....', networks
-        self.vrouter_setup()
-        _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
-                                                          peer_address = start_peer_address,
-                                                          route_update = route_update,
-                                                          router_address = start_network,
-                                                          time_expire = time_expire,
-                                                          adding_new_routes = adding_new_routes)
-        self.vrouter_traffic_verify()
-        self.vrouter_host_unload()
-        return True
-
-############### Cord Subscriber utility functions #########################
-
-    @classmethod
-    def flows_setup(cls):
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = cls.default_port_map
-        cls.device_id = OnosCtrl.get_device_id()
-        num_ports = len(cls.port_map['ports'] + cls.port_map['relay_ports'])
-        cls.port_offset = int(os.getenv('TEST_INSTANCE', 0)) * num_ports
-
-    @classmethod
-    def update_apps_version(cls):
-            version = Onos.getVersion()
-            major = int(version.split('.')[0])
-            minor = int(version.split('.')[1])
-            cordigmp_app_version = '2.0-SNAPSHOT'
-            olt_app_version = '1.2-SNAPSHOT'
-            if major > 1:
-                  cordigmp_app_version = '3.0-SNAPSHOT'
-                  olt_app_version = '2.0-SNAPSHOT'
-            elif major == 1:
-                  if minor > 10:
-                        cordigmp_app_version = '3.0-SNAPSHOT'
-                        olt_app_version = '2.0-SNAPSHOT'
-                  elif minor <= 8:
-                        olt_app_version = '1.1-SNAPSHOT'
-            cls.app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-{}.oar'.format(cordigmp_app_version))
-            cls.table_app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-multitable-{}.oar'.format(cordigmp_app_version))
-            cls.olt_app_file = os.path.join(cls.test_path, '..', 'apps/olt-app-{}.oar'.format(olt_app_version))
-
-    @classmethod
-    def subscriber_setup(cls):
-        log.info('in subscriber_setup function 000000000')
-	cls.subscriber_apps = ('org.opencord.aaa', 'org.onosproject.dhcp')
-        for app in cls.subscriber_apps:
-            OnosCtrl(app).activate()
-	cls.update_apps_version()
-        #dids = OnosCtrl.get_device_ids()
-        #device_map = {}
-        #for did in dids:
-        #      device_map[did] = { 'basic' : { 'driver' : 'voltha' } }
-        #network_cfg = {}
-        #network_cfg = { 'devices' : device_map }
-        #Restart ONOS with cpqd driver config for OVS
-        #cls.start_onos(network_cfg = network_cfg)
-        cls.port_map, cls.port_list = cls.olt.olt_port_map()
-        cls.switches = cls.port_map['switches']
-        cls.num_ports = cls.port_map['num_ports']
-        if cls.num_ports > 1:
-              cls.num_ports -= 1 ##account for the tx port
-        #Uninstall the existing app if any
-        #OnosCtrl.uninstall_app(cls.table_app)
-        #log_test.info('Installing the multi table app %s for subscriber test' %(cls.table_app_file))
-        #OnosCtrl.install_app(cls.table_app_file)
-
-    @classmethod
-    def subscriber_teardown(cls):
-        log.info('in subscriber_teardown function 000000000')
-        apps = cls.olt_apps + cls.subscriber_apps #( cls.table_app,)
-        for app in apps:
-            OnosCtrl(app).deactivate()
-        #cls.start_onos(network_cfg = {})
-        #OnosCtrl.uninstall_app(cls.table_app)
-        #log_test.info('Installing back the cord igmp app %s for subscriber test on exit' %(cls.app_file))
-        #OnosCtrl.install_app(cls.app_file)
-
-    @classmethod
-    def start_cpqd(cls, mac = '00:11:22:33:44:55'):
-            dpid = mac.replace(':', '')
-            cpqd_file = os.sep.join( (cls.cpqd_path, 'cpqd.sh') )
-            cpqd_cmd = '{} {}'.format(cpqd_file, dpid)
-            ret = os.system(cpqd_cmd)
-            assert_equal(ret, 0)
-            time.sleep(10)
-            device_id = 'of:{}{}'.format('0'*4, dpid)
-            return device_id
-
-    @classmethod
-    def start_ovs(cls):
-            ovs_file = os.sep.join( (cls.ovs_path, 'of-bridge.sh') )
-            ret = os.system(ovs_file)
-            assert_equal(ret, 0)
-            time.sleep(30)
-    @classmethod
-    def ovs_cleanup(cls):
-            log.info('executing ovs_cleanup function 000000000000000000')
-            ##For every test case, delete all the OVS groups
-            cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
-            try:
-                  cord_test_shell(cmd)
-                  ##Since olt config is used for this test, we just fire a careless local cmd as well
-                  os.system(cmd)
-            finally:
-                  return
-    def tls_verify(self, subscriber):
-            def tls_fail_cb():
-                  log_test.info('TLS verification failed')
-            if subscriber.has_service('TLS'):
-                  #OnosCtrl('org.opencord.aaa').deactivate()
-                  #time.sleep(2)
-                  #OnosCtrl('org.opencord.aaa').activate()
-                  #time.sleep(5)
-                  tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
-                  log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-                  tls.runTest()
-                  assert_equal(tls.failTest, False)
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  self.test_status = True
-                  return self.test_status
-
-    def generate_port_list(self, subscribers, channels):
-            log.info('port list in generate port list is %s'%self.port_list)
-            return self.port_list[:subscribers]
-    def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = [], services = None):
-          '''Load the subscriber from the database'''
-          log.info('executing subscriber_load finction 000000000')
-          test_services = services if services else self.test_services
-          self.subscriber_db = SubscriberDB(create = create, services = test_services)
-          if create is True:
-                self.subscriber_db.generate(num)
-          self.subscriber_info = self.subscriber_db.read(num)
-          self.subscriber_list = []
-          if not port_list:
-                port_list = self.generate_port_list(num, num_channels)
-          log.info('port_list in subscriber load is %s'%port_list)
-          index = 0
-          for info in self.subscriber_info:
-                self.subscriber_list.append(Subscriber(name=info['Name'],
-                                                         service=info['Service'],
-                                                        port_map = self.port_map,
-                                                         num=num_channels,
-                                                         channel_start = channel_start,
-                                                         tx_port = port_list[index][0],
-                                                         rx_port = port_list[index][1]))
-                if num_channels > 1:
-                      channel_start += num_channels
-                index += 1
-          #load the ssm list for all subscriber channels
-          igmpChannel = IgmpChannel()
-          ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
-          ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
-          igmpChannel.igmp_load_ssm_config(ssm_list)
-    def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1,
-                                  channel_start = 0, cbs = None, port_list = [],
-                                  services = None, negative_subscriber_auth = None):
-        log.info('in subscriber_join_verify function 000000000')
-        self.test_status = False
-        self.ovs_cleanup()
-        subscribers_count = num_subscribers
-        sub_loop_count =  num_subscribers
-        self.subscriber_load(create = True, num = num_subscribers,
-                             num_channels = num_channels, channel_start = channel_start, port_list = port_list,
-                             services = services)
-        self.onos_aaa_config()
-        self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
-        chan_leave = False #for single channel, multiple subscribers
-        if cbs is None:
-              cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-              chan_leave = True
-        cbs_negative = cbs
-        for subscriber in self.subscriber_list:
-              if services and 'IGMP' in services:
-                 subscriber.start()
-              if negative_subscriber_auth is 'half' and sub_loop_count%2 is not 0:
-                 cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-              elif negative_subscriber_auth is 'onethird' and sub_loop_count%3 is not 0:
-                 cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify, self.traffic_verify)
-              else:
-                 cbs = cbs_negative
-              sub_loop_count = sub_loop_count - 1
-              pool_object = subscriber_pool(subscriber, cbs)
-              self.thread_pool.addTask(pool_object.pool_cb)
-        self.thread_pool.cleanUpThreads()
-        for subscriber in self.subscriber_list:
-              if services and 'IGMP' in services:
-                 subscriber.stop()
-              if chan_leave is True:
-                    subscriber.channel_leave(0)
-        subscribers_count = 0
-        return self.test_status
-    def tls_invalid_cert(self, subscriber):
-          log.info('in tls_invalid_cert function 000000000000000')
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_INVALID)
-             tls.runTest()
-             if tls.failTest == True:
-                self.test_status = False
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-    def tls_verify(self, subscriber):
-            def tls_fail_cb():
-                  log_test.info('TLS verification failed')
-            if subscriber.has_service('TLS'):
-                  tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
-                  log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-                  tls.runTest()
-                  assert_equal(tls.failTest, False)
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  self.test_status = True
-                  return self.test_status
-
-    def tls_non_ca_authrized_cert(self, subscriber):
-          if subscriber.has_service('TLS'):
-             time.sleep(2)
-             log_test.info('Running subscriber %s tls auth test' %subscriber.name)
-             tls = TLSAuthTest(client_cert = self.CLIENT_CERT_NON_CA_AUTHORIZED)
-             tls.runTest()
-             if tls.failTest == False:
-                self.test_status = True
-             return self.test_status
-          else:
-              self.test_status = True
-              return self.test_status
-
-    def dhcp_verify(self, subscriber):
-            log.info('in dhcp_verify function 000000000000000')
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, update_seed = True)
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-    def dhcp_jump_verify(self, subscriber):
-            if subscriber.has_service('DHCP'):
-                  cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
-                  log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
-                  subscriber.src_list = [cip]
-                  self.test_status = True
-                  return self.test_status
-            else:
-                  subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
-                  self.test_status = True
-                  return self.test_status
-
-    def igmp_verify(self, subscriber):
-            log.info('in igmp_verify function 000000000000000')
-            chan = 0
-            if subscriber.has_service('IGMP'):
-                  ##We wait for all the subscribers to join before triggering leaves
-                  if subscriber.rx_port > 1:
-                        time.sleep(5)
-                  subscriber.channel_join(chan, delay = 0)
-                  self.num_joins += 1
-                  while self.num_joins < self.num_subscribers:
-                        time.sleep(5)
-                  log_test.info('All subscribers have joined the channel')
-                  for i in range(10):
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        log_test.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_leave(chan)
-                        time.sleep(5)
-                        log_test.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
-                        #Should not receive packets for this subscriber
-                        self.recv_timeout = True
-                        subscriber.recv_timeout = True
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-                        subscriber.recv_timeout = False
-                        self.recv_timeout = False
-                        log_test.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
-                        subscriber.channel_join(chan, delay = 0)
-                  self.test_status = True
-                  return self.test_status
-
-    def igmp_jump_verify(self, subscriber):
-            if subscriber.has_service('IGMP'):
-                  for i in xrange(subscriber.num):
-                        log_test.info('Subscriber %s jumping channel' %subscriber.name)
-                        chan = subscriber.channel_jump(delay=0)
-                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-                        log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-                        time.sleep(3)
-                  log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-                  self.test_status = True
-                  return self.test_status
-    def traffic_verify(self, subscriber):
-            if subscriber.has_service('TRAFFIC'):
-                  url = 'http://www.google.com'
-                  resp = requests.get(url)
-                  self.test_status = resp.ok
-                  if resp.ok == False:
-                        log_test.info('Subscriber %s failed get from url %s with status code %d'
-                                 %(subscriber.name, url, resp.status_code))
-                  else:
-                        log_test.info('GET request from %s succeeded for subscriber %s'
-                                 %(url, subscriber.name))
-                  return self.test_status
-################## common utility functions #######################
-    def get_system_cpu_usage(self):
-        """ Getting compute node CPU usage """
-        ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-        cmd = "top -b -n1 | grep 'Cpu(s)' | awk '{print $2 + $4}'"
-        status, output = ssh_agent.run_cmd(cmd)
-        assert_equal(status, True)
-        return float(output)
-
-    @classmethod
-    def start_onos(cls, network_cfg = None):
-        if type(network_cfg) is tuple:
-            res = []
-            for v in network_cfg:
-                res += v.items()
-            config = dict(res)
-        else:
-            config = network_cfg
-        log_test.info('Restarting ONOS with new network configuration')
-        return cord_test_onos_restart(config = config)
-
-    @classmethod
-    def config_restore(cls):
-        """Restore the vsg test configuration on test case failures"""
-        for restore_method in cls.restore_methods:
-            restore_method()
-
-    def onos_aaa_config(self):
-        OnosCtrl.aaa_load_config()
-
-    def onos_load_config(self, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('Configure request for AAA returned status %d' %code)
-            assert_equal(status, True)
-            time.sleep(3)
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    def incmac(self, mac):
-        tmp =  str(hex(int('0x'+mac,16)+1).split('x')[1])
-        mac = '0'+ tmp if len(tmp) < 2 else tmp
-        return mac
-
-    def next_mac(self, mac):
-        mac = mac.split(":")
-        mac[5] = self.incmac(mac[5])
-
-        if len(mac[5]) > 2:
-           mac[0] = self.incmac(mac[0])
-           mac[5] = '01'
-
-        if len(mac[0]) > 2:
-           mac[0] = '01'
-           mac[1] = self.incmac(mac[1])
-           mac[5] = '01'
-        return ':'.join(mac)
-
-    def to_egress_mac(cls, mac):
-        mac = mac.split(":")
-        mac[4] = '01'
-        return ':'.join(mac)
-
-    def inc_ip(self, ip, i):
-
-        ip[i] =str(int(ip[i])+1)
-        return '.'.join(ip)
-    def next_ip(self, ip):
-
-        lst = ip.split('.')
-        for i in (3,0,-1):
-            if int(lst[i]) < 255:
-               return self.inc_ip(lst, i)
-            elif int(lst[i]) == 255:
-               lst[i] = '0'
-               if int(lst[i-1]) < 255:
-                  return self.inc_ip(lst,i-1)
-               elif int(lst[i-2]) < 255:
-                  lst[i-1] = '0'
-                  return self.inc_ip(lst,i-2)
-               else:
-                  break
-
-    def to_egress_ip(self, ip):
-        lst=ip.split('.')
-        lst[0] = '182'
-        return '.'.join(lst)
diff --git a/src/test/utils/Stats.py b/src/test/utils/Stats.py
deleted file mode 100644
index dd5efe3..0000000
--- a/src/test/utils/Stats.py
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from math import sqrt
-
-class Stats:
-      def __init__(self):
-            self.count = 0
-            self.start = 0
-            self.delta = 0
-            self.min = 0
-            self.max = 0
-            self.delta_squares = 0
-
-      def update(self, packets = 0, t = 0, usecs = False):
-            self.count += packets
-            if usecs == False:
-                  t *= 1000000 ##convert to usecs
-            if self.start == 0:
-                  self.start = t
-            self.delta += t
-            self.delta_squares += t*t
-            if self.min == 0 or t < self.min:
-                  self.min = t
-            if self.max == 0 or t > self.max:
-                  self.max = t
-
-      def __repr__(self):
-            if self.count == 0:
-                  self.count = 1
-            mean = self.delta/self.count
-            mean_square = mean*mean
-            delta_square_mean = self.delta_squares/self.count
-            std_mean = sqrt(delta_square_mean - mean_square)
-            r = 'Avg %.3f usecs, Std deviation %.3f usecs, Min %.3f, Max %.3f for %d packets\n' %(
-                  mean, std_mean, self.min, self.max, self.count)
-            return r
-
diff --git a/src/test/utils/TestManifest.py b/src/test/utils/TestManifest.py
deleted file mode 100644
index 72876f4..0000000
--- a/src/test/utils/TestManifest.py
+++ /dev/null
@@ -1,113 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import json
-import os
-import shutil
-import platform
-from CordTestServer import CORD_TEST_HOST, CORD_TEST_PORT
-
-class TestManifest(object):
-
-    def __init__(self, manifest = None, args = None):
-        self.manifest = manifest
-        if args is not None and manifest is None:
-            self.onos_ip = None
-            self.radius_ip = None
-            self.head_node = platform.node()
-            self.log_level = args.log_level.upper()
-            self.onos_instances = args.onos_instances
-            self.async_mode = args.async_mode
-            self.shared_volume = args.shared_volume
-            self.olt = args.olt
-            self.olt_config = args.olt_config
-            self.start_switch = args.start_switch
-            self.setup_dhcpd = args.setup_dhcpd
-            self.image_prefix = args.prefix
-            self.onos_image = args.onos
-            self.test_controller = args.test_controller
-            if self.test_controller:
-                ips = self.test_controller.split('/')
-                self.onos_ip = ips[0]
-                if len(ips) > 1:
-                    self.radius_ip = ips[1]
-            self.onos_cord = args.onos_cord if args.onos_cord else None
-            self.service_profile = args.service_profile if args.service_profile else None
-            self.synchronizer = args.synchronizer if args.synchronizer else None
-            self.docker_network = args.network if args.network else None
-            self.iterations = None
-            self.server = args.server
-            self.jvm_heap_size = args.jvm_heap_size if args.jvm_heap_size else None
-            self.karaf_version = args.karaf
-            self.voltha_loc = args.voltha_loc
-            self.voltha_intf = args.voltha_intf
-            self.voltha_enable = args.voltha_enable
-            self.voltha_container_mode = args.voltha_container_mode
-            self.expose_port = args.expose_port
-            self.skip_onos_restart = args.skip_onos_restart
-        else:
-            with open(self.manifest, 'r') as fd:
-                data = json.load(fd)
-            self.onos_ip = data.get('onos', None)
-            self.radius_ip = data.get('radius', None)
-            self.test_controller = '' if self.onos_ip is None else self.onos_ip
-            if self.onos_ip and self.radius_ip:
-                self.test_controller = '{}/{}'.format(self.onos_ip, self.radius_ip)
-            self.onos_cord = data.get('onos_cord', None)
-            self.service_profile = data.get('service_profile', None)
-            self.synchronizer = data.get('synchronizer', None)
-            self.head_node = data.get('head_node', platform.node())
-            self.log_level = data.get('log_level', 'INFO').upper()
-            self.onos_instances = data.get('onos_instances', 1)
-            self.shared_volume = data.get('shared_volume', True)
-            self.async_mode = True if self.onos_instances > 1 else False
-            self.olt = data.get('olt', True)
-            self.olt_config = data.get('olt_config', 'olt_config.json')
-            self.start_switch = data.get('start_switch', self.olt)
-            self.setup_dhcpd = data.get('setup_dhcpd', self.start_switch)
-            self.image_prefix = data.get('image_prefix', '')
-            self.onos_image = data.get('onos_image', 'onosproject/onos:latest')
-            self.docker_network = data.get('docker_network', None)
-            self.server = data.get('test_server', '{}:{}'.format(CORD_TEST_HOST, CORD_TEST_PORT))
-            self.iterations = data.get('iterations', None)
-            self.jvm_heap_size = data.get('jvm_heap_size', None)
-            self.karaf_version = data.get('karaf_version', '3.0.8')
-            self.voltha_loc = data.get('voltha_loc', '')
-            self.voltha_intf = data.get('voltha_intf', 'eth0')
-            voltha_enable = False
-            if self.voltha_loc:
-                voltha_enable = True
-            self.voltha_enable = data.get('voltha_enable', voltha_enable)
-            self.voltha_container_mode = data.get('voltha_container_mode', True)
-            self.expose_port = data.get('expose_port', False)
-            if self.voltha_enable and self.voltha_container_mode:
-                self.expose_port = True
-            self.skip_onos_restart = data.get('skip_onos_restart', False)
diff --git a/src/test/utils/VSGAccess.py b/src/test/utils/VSGAccess.py
deleted file mode 100644
index e179d29..0000000
--- a/src/test/utils/VSGAccess.py
+++ /dev/null
@@ -1,519 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import shutil
-import re
-from novaclient import client as nova_client
-from SSHTestAgent import SSHTestAgent
-from CordTestUtils import *
-from CordTestUtils import log_test as log
-
-log.setLevel('INFO')
-
-class VSGAccess(object):
-
-    vcpe_map = {}
-    interface_map = {}
-    ip_addr_pattern = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/[0-9]{1,2}$')
-
-    @classmethod
-    def setUp(cls):
-        try:
-            shutil.copy('/etc/resolv.conf', '/etc/resolv.conf.orig')
-        except:
-            pass
-
-    @classmethod
-    def tearDown(cls):
-        try:
-            shutil.copy('/etc/resolv.conf.orig', '/etc/resolv.conf')
-        except:
-            pass
-
-    '''
-    @method: get_nova_credentials_v2
-    @Description: Get nova credentials
-    @params:
-    returns credential from env
-    '''
-    @classmethod
-    def get_nova_credentials_v2(cls):
-        credential = {}
-        credential['username'] = os.environ['OS_USERNAME']
-        credential['api_key'] = os.environ['OS_PASSWORD']
-        credential['auth_url'] = os.environ['OS_AUTH_URL']
-        credential['project_id'] = os.environ['OS_TENANT_NAME']
-        return credential
-
-    '''
-    @method: get_compute_nodes
-    @Description: Get the list of compute nodes
-    @params:
-    returns  node list
-    '''
-    @classmethod
-    def get_compute_nodes(cls):
-        credentials = cls.get_nova_credentials_v2()
-        nvclient = nova_client.Client('2', **credentials)
-        return nvclient.hypervisors.list()
-
-    '''
-    @method: get_vsgs
-    @Description: Get list of vsg's running in compute node
-    @params: status of vsg
-    returns vsg wrappers
-    '''
-    @classmethod
-    def get_vsgs(cls, active = True):
-        credentials = cls.get_nova_credentials_v2()
-        nvclient = nova_client.Client('2', **credentials)
-        vsgs = nvclient.servers.list(search_opts = {'all_tenants': 1})
-        if active is True:
-            vsgs = filter(lambda vsg: vsg.status == 'ACTIVE', vsgs)
-        vsg_wrappers = []
-        for vsg in vsgs:
-            vsg_wrappers.append(VSGWrapper(vsg))
-        return vsg_wrappers
-
-    '''
-    @method: open_mgmt
-    @Description: Bringing up Interface for access to management
-    @params: intf = "Interface to open"
-    returns Gateway
-    '''
-    @classmethod
-    def open_mgmt(cls, intf = 'eth0'):
-        if intf in cls.interface_map:
-            gw = cls.interface_map[intf]['gw']
-            ip = cls.interface_map[intf]['ip']
-            if gw != '0.0.0.0':
-                current_gw, _ = get_default_gw()
-                cmds = [ 'route del default gw {}'.format(current_gw),
-                         'ifconfig {} {} up'.format(intf, ip),
-                         'route add default gw {}'.format(gw) ]
-                for cmd in cmds:
-                    os.system(cmd)
-                shutil.copy('/etc/resolv.conf', '/etc/resolv.conf.lastdhcp')
-                shutil.copy('/etc/resolv.conf.orig', '/etc/resolv.conf')
-                return current_gw
-        return None
-
-    '''
-    @method: close_mgmt
-    @Description: Bringing up gateway deleting default
-    @params: intf = "Interface to open"
-             dict2 = retrieved data from GET method
-    returns: NA
-    '''
-    @classmethod
-    def close_mgmt(cls, restore_gw, intf = 'eth0'):
-        if restore_gw:
-            cmds = [ 'route del default gw 0.0.0.0',
-                     'route add default gw {}'.format(restore_gw),
-                     'cp /etc/resolv.conf.lastdhcp /etc/resolv.conf',
-                     'rm -f /etc/resolv.conf.lastdhcp'
-                     ]
-            for cmd in cmds:
-                os.system(cmd)
-
-    '''
-    @method: health_check
-    @Description: Check if vsgs are reachable
-    @params:
-    returns True
-    '''
-    @classmethod
-    def health_check(cls):
-        '''Returns 0 if all active vsgs are reachable through the compute node'''
-        vsgs = cls.get_vsgs()
-        vsg_status = []
-        for vsg in vsgs:
-            vsg_status.append(vsg.get_health())
-        unreachable = filter(lambda st: st == False, vsg_status)
-        return len(unreachable) == 0
-
-    '''
-    @method: get_vcpe_vsg
-    @Description: Getting vsg vm instance info from given vcpe
-    @params: vcpe = "vcpe name"
-    returns vsg
-    '''
-    @classmethod
-    def get_vcpe_vsg(cls, vcpe):
-        '''Find the vsg hosting the vcpe service'''
-        if vcpe in cls.vcpe_map:
-            return cls.vcpe_map[vcpe]['vsg']
-        vsgs = cls.get_vsgs()
-        for vsg in vsgs:
-            cmd = 'sudo docker exec {} ls 2>/dev/null'.format(vcpe)
-            st, _ = vsg.run_cmd(cmd, timeout = 30)
-            if st == True:
-                return vsg
-        return None
-
-    '''
-    @method: save_vcpe_config
-    @Description: Saving vcpe config with lan & wan side info
-    @params: vsg
-             vcpe
-    returns True
-    '''
-    @classmethod
-    def save_vcpe_config(cls, vsg, vcpe):
-        if vcpe not in cls.vcpe_map:
-            cmd_gw = "sudo docker exec %s ip route show | grep default | head -1 | awk '{print $3}'" %(vcpe)
-            vsg_ip = vsg.ip
-            if vsg_ip is None:
-                return False
-            st, output = vsg.run_cmd(cmd_gw, timeout = 30)
-            if st == False or not output:
-                return False
-            gw = output
-            cmd_wan = "sudo docker exec %s ip addr show eth0 |grep inet |head -1 | tr -s ' ' | awk '{print $2}' | awk '{print $1}'" %(vcpe)
-            cmd_lan = "sudo docker exec %s ip addr show eth1 |grep inet |head -1 | tr -s ' ' | awk '{print $2}' | awk '{print $1}'" %(vcpe)
-            st, output = vsg.run_cmd(cmd_wan, timeout = 30)
-            ip_wan = '0.0.0.0/24'
-            ip_lan = '0.0.0.0/24'
-            if st and output:
-                if cls.ip_addr_pattern.match(output):
-                    ip_wan = output
-
-            st, output = vsg.run_cmd(cmd_lan, timeout = 30)
-            if st and output:
-                if cls.ip_addr_pattern.match(output):
-                    ip_lan = output
-
-            cls.vcpe_map[vcpe] = { 'vsg': vsg, 'gw': gw, 'wan': ip_wan, 'lan': ip_lan }
-
-        return True
-
-    '''
-    @method: restore_vcpe_config
-    @Description: Restoring saved config for lan & wan
-    @params: vcpe
-             gw
-             wan
-             lan
-    returns True/False
-    '''
-    @classmethod
-    def restore_vcpe_config(cls, vcpe, gw = True, wan = False, lan = False):
-        if vcpe in cls.vcpe_map:
-            vsg = cls.vcpe_map[vcpe]['vsg']
-            cmds = []
-            if gw is True:
-                #restore default gw
-                gw = cls.vcpe_map[vcpe]['gw']
-                cmds.append('sudo docker exec {} ip link set eth0 up'.format(vcpe))
-                cmds.append('sudo docker exec {} route add default gw {} dev eth0'.format(vcpe, gw))
-            if wan is True:
-                ip_wan = cls.vcpe_map[vcpe]['wan']
-                cmds.append('sudo docker exec {} ip addr set {} dev eth0'.format(vcpe, ip_wan))
-            if lan is True:
-                ip_lan = cls.vcpe_map[vcpe]['lan']
-                cmds.append('sudo docker exec {} ip addr set {} dev eth1'.format(vcpe, ip_lan))
-            ret_status = True
-            for cmd in cmds:
-                st, _ = vsg.run_cmd(cmd, timeout = 30)
-                if st == False:
-                    ret_status = False
-            return ret_status
-        return False
-
-    '''
-    @method: get_vcpe_gw
-    @Description: Get gw of vcpe from created map
-    @params: vcpe
-    returns gw
-    '''
-    @classmethod
-    def get_vcpe_gw(cls, vcpe):
-        if vcpe in cls.vcpe_map:
-            return cls.vcpe_map[vcpe]['gw']
-        return None
-
-    '''
-    @method: get_vcpe_wan
-    @Description:
-    @params:
-    return wan side of vcpe
-    '''
-    @classmethod
-    def get_vcpe_wan(cls, vcpe):
-        if vcpe in cls.vcpe_map:
-            return cls.vcpe_map[vcpe]['wan']
-        return None
-
-    '''
-    @method: get_vcpe_lan
-    @Description:
-    @params:
-    returns True if contents of dict1 exists in dict2
-    '''
-    @classmethod
-    def get_vcpe_lan(cls, vcpe):
-        if vcpe in cls.vcpe_map:
-            return cls.vcpe_map[vcpe]['lan']
-        return None
-
-    '''
-    @method: vcpe_wan_up
-    @Description:
-    @params:
-    returns status
-    '''
-    @classmethod
-    def vcpe_wan_up(cls, vcpe):
-        return cls.restore_vcpe_config(vcpe)
-
-    '''
-    @method: vcpe_lan_up
-    @Description:
-    @params:
-    returns status
-    '''
-    @classmethod
-    def vcpe_lan_up(cls, vcpe, vsg = None):
-        if vsg is None:
-            vsg = cls.get_vcpe_vsg(vcpe)
-            if vsg is None:
-                return False
-        cmd = 'sudo docker exec {} ip link set eth1 up'.format(vcpe)
-        st, _ = vsg.run_cmd(cmd, timeout = 30)
-        return st
-
-    '''
-    @method: vcpe_port_down
-    @Description:
-    @params:
-    returns status
-    '''
-    #we cannot access compute node if the vcpe port gets dhcp as default would be through fabric
-    @classmethod
-    def vcpe_port_down(cls, vcpe, port, vsg = None):
-        if vsg is None:
-            vsg = cls.get_vcpe_vsg(vcpe)
-            if vsg is None:
-                return False
-        if not cls.save_vcpe_config(vsg, vcpe):
-            return False
-        cmd = 'sudo docker exec {} ip link set {} down'.format(vcpe, port)
-        st, _ = vsg.run_cmd(cmd, timeout = 30)
-        if st is False:
-            cls.restore_vcpe_config(vcpe)
-            return False
-        return st
-
-    '''
-    @method: vcpe_wan_down
-    @Description:
-    @params:
-    returns status
-    '''
-    @classmethod
-    def vcpe_wan_down(cls, vcpe, vsg = None):
-        return cls.vcpe_port_down(vcpe, 'eth0', vsg = vsg)
-
-    '''
-    @method: vcpe_lan_down
-    @Description:
-    @params:
-    returns status
-    '''
-    @classmethod
-    def vcpe_lan_down(cls, vcpe, vsg = None):
-        return cls.vcpe_port_down(vcpe, 'eth1', vsg = vsg)
-
-    '''
-    @method: save_interface_config
-    @Description:
-    @params:
-    returns NA
-    '''
-    @classmethod
-    def save_interface_config(cls, intf):
-        if intf not in cls.interface_map:
-            ip = get_ip(intf)
-            if ip is None:
-                ip = '0.0.0.0'
-            default_gw, default_gw_device = get_default_gw()
-            if default_gw_device != intf:
-                default_gw = '0.0.0.0'
-            cls.interface_map[intf] = { 'ip' : ip, 'gw': default_gw }
-            #bounce the interface to remove default gw
-            cmds = ['ifconfig {} 0 down'.format(intf),
-                    'ifconfig {} 0 up'.format(intf)
-                    ]
-            for cmd in cmds:
-                os.system(cmd)
-
-    '''
-    @method: restore_interface_config
-    @Description:
-    @params:
-    returns NA
-    '''
-    #open up access to compute node
-    @classmethod
-    def restore_interface_config(cls, intf, vcpe = None):
-        if intf in cls.interface_map:
-            ip = cls.interface_map[intf]['ip']
-            gw = cls.interface_map[intf]['gw']
-            del cls.interface_map[intf]
-            cmds = []
-            if vcpe is not None:
-                shutil.copy('/etc/resolv.conf.orig', '/etc/resolv.conf')
-                #bounce the vcpes to clear default gw
-                cmds.append('ifconfig {} 0 down'.format(vcpe))
-                cmds.append('ifconfig {} 0 up'.format(vcpe))
-            cmds.append('ifconfig {} {} up'.format(intf, ip))
-            if gw and gw != '0.0.0.0':
-                cmds.append('route add default gw {} dev {}'.format(gw, intf))
-            for cmd in cmds:
-                os.system(cmd)
-
-    '''
-    @method: vcpe_get_dhcp
-    @Description: Get DHCP from vcpe dhcp interface.
-    @params:
-    returns vcpe ip
-    '''
-    @classmethod
-    def vcpe_get_dhcp(cls, vcpe, mgmt = 'eth0'):
-        '''Get DHCP from vcpe dhcp interface.'''
-        '''We have to also save the management interface config for restoration'''
-        cls.save_interface_config(mgmt)
-        getstatusoutput('pkill -9 dhclient')
-        st, output = getstatusoutput('dhclient -q {}'.format(vcpe))
-        getstatusoutput('pkill -9 dhclient')
-        vcpe_ip = get_ip(vcpe)
-        if vcpe_ip is None:
-            cls.restore_interface_config(mgmt)
-            return None
-        if output:
-            #workaround for docker container apparmor that prevents moving dhclient resolv.conf
-            start = output.find('/etc/resolv.conf')
-            if start >= 0:
-                end = output.find("'", start)
-                dns_file = output[start:end]
-                if os.access(dns_file, os.F_OK):
-                    shutil.copy(dns_file, '/etc/resolv.conf')
-
-        default_gw, default_gw_device = get_default_gw()
-        if default_gw and default_gw_device == vcpe:
-            return vcpe_ip
-        cls.restore_interface_config(mgmt, vcpe = vcpe)
-        return None
-
-class VSGWrapper(object):
-
-    def __init__(self, vsg):
-        self.vsg = vsg
-        self.name = self.vsg.name
-        self.compute_node = self.get_compute_node()
-        self.ip = self.get_ip()
-
-    '''
-    @method: get_compute_node
-    @Description:
-    @params:
-    returns compute node name
-    '''
-    def get_compute_node(self):
-        return self.vsg._info['OS-EXT-SRV-ATTR:hypervisor_hostname']
-
-    '''
-    @method: get_ip
-    @Description:
-    @params:
-    returns ip of network
-    '''
-    def get_ip(self):
-        if 'management' in self.vsg.networks:
-            ips = self.vsg.networks['management']
-            if len(ips) > 0:
-                return ips[0]
-        return None
-
-    '''
-    @method: run_cmd_compute
-    @Description:
-    @params:
-    returns Status & output
-    '''
-    def run_cmd_compute(self, cmd, timeout = 5):
-        ssh_agent = SSHTestAgent(self.compute_node)
-        st, output = ssh_agent.run_cmd(cmd, timeout = timeout)
-        if st == True and output:
-            output = output.strip()
-        else:
-            output = None
-
-        return st, output
-
-    '''
-    @method: run_cmd
-    @Description:
-    @params:
-    returns status & output
-    '''
-    def run_cmd(self, cmd, timeout = 5, mgmt = 'eth0'):
-        last_gw = VSGAccess.open_mgmt(mgmt)
-        ssh_agent = SSHTestAgent(self.compute_node)
-        ssh_cmd = 'ssh {} {}'.format(self.ip, cmd)
-        st, output = ssh_agent.run_cmd(ssh_cmd, timeout = timeout)
-        if st == True and output:
-            output = output.strip()
-        else:
-            output = None
-        VSGAccess.close_mgmt(last_gw, mgmt)
-        return st, output
-
-    '''
-    @method: get_health
-    @Description:
-    @params:
-    returns Status
-    '''
-    def get_health(self):
-        if self.ip is None:
-            return True
-        cmd = 'ping -c 1 {}'.format(self.ip)
-        log.info('Pinging VSG %s at IP %s' %(self.name, self.ip))
-        st, _ = self.run_cmd_compute(cmd)
-        log.info('VSG %s at IP %s is %s through compute node %s' %(self.name, self.ip, 'reachable' if st == True else 'unreachable', self.compute_node))
-        return st
-
-    '''
-    @method: check_access
-    @Description: validates access
-    @params:
-    returns Status
-    '''
-    def check_access(self):
-        if self.ip is None:
-            return True
-        ssh_agent = SSHTestAgent(self.compute_node)
-        st, _ = ssh_agent.run_cmd('ls', timeout=10)
-        if st == False:
-            log.error('Compute node at %s is not accessible' %(self.compute_node))
-            return st
-        log.info('Checking if VSG at %s is accessible from compute node %s' %(self.ip, self.compute_node))
-        st, _ = ssh_agent.run_cmd('ssh {} ls'.format(self.ip), timeout=30)
-        if st == True:
-            log.info('OK')
-        return st
diff --git a/src/test/utils/VolthaCtrl.py b/src/test/utils/VolthaCtrl.py
deleted file mode 100644
index 0a73551..0000000
--- a/src/test/utils/VolthaCtrl.py
+++ /dev/null
@@ -1,497 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import requests
-import json
-import time
-import os
-import signal
-from CordTestUtils import log_test as log, getstatusoutput, get_controller
-from CordContainer import Container, Onos
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-
-class VolthaService(object):
-    services = ('vconsul', 'kafka', 'zookeeper', 'registrator', 'fluentd')
-    standalone_services = ('envoy', 'voltha', 'ofagent', 'cli',)
-    compose_file = 'docker-compose-system-test.yml'
-    service_map = {}
-    PROJECT = 'cordtester'
-    NETWORK = '{}_default'.format(PROJECT)
-    CONTAINER_MODE = False
-    REST_SERVICE = 'chameleon'
-    DOCKER_HOST_IP = '172.17.0.1'
-    PONSIM_HOST = '172.17.0.1'
-
-    def __init__(self, voltha_loc, controller, interface = 'eth0', olt_config = None, container_mode = False):
-        if not os.access(voltha_loc, os.F_OK):
-            raise Exception('Voltha location %s not found' %voltha_loc)
-        compose_file_loc = os.path.join(voltha_loc, 'compose', self.compose_file)
-        if not os.access(compose_file_loc, os.F_OK):
-            raise Exception('Voltha compose file %s not found' %compose_file_loc)
-        self.voltha_loc = voltha_loc
-        self.controller = controller
-        self.interface = interface
-        self.compose_file_loc = compose_file_loc
-        VolthaService.CONTAINER_MODE = container_mode
-        num_onus = 1
-        if olt_config is not None:
-            port_map, _ = OltConfig(olt_config).olt_port_map()
-            if port_map['ponsim'] is True:
-                num_onus = max(1, len(port_map['ports']))
-        self.num_onus = num_onus
-
-    def start_services(self, *services):
-        services_fmt = ' {}' * len(services)
-        services_cmd_fmt = 'DOCKER_HOST_IP={} docker-compose -p {} -f {} up -d {}'.format(self.DOCKER_HOST_IP,
-                                                                                          self.PROJECT,
-                                                                                          self.compose_file_loc,
-                                                                                          services_fmt)
-        start_cmd = services_cmd_fmt.format(*services)
-        ret = os.system(start_cmd)
-        if ret != 0:
-            raise Exception('Failed to start voltha services. Failed with code %d' %ret)
-
-        for service in services:
-            name = '{}_{}_1'.format(self.PROJECT, service)
-            cnt = Container(name, name)
-            ip = cnt.ip(network = self.NETWORK)
-            if not ip:
-                raise Exception('IP not found for container %s' %name)
-            print('IP %s for service %s' %(ip, service))
-            self.service_map[service] = dict(name = name, network = self.NETWORK, ip = ip)
-
-    def ponmgmt_enable(self):
-        cmds = ('echo 8 | tee /sys/class/net/ponmgmt/bridge/group_fwd_mask',
-                'brctl addif ponmgmt {} >/dev/null 2>&1'.format(self.interface),
-        )
-        for cmd in cmds:
-            try:
-                os.system(cmd)
-            except:
-                pass
-
-    def start(self):
-        self.start_services(*self.services)
-        if self.CONTAINER_MODE is True:
-            self.start_services(*self.standalone_services)
-            #enable multicast mac forwarding:
-            self.ponmgmt_enable()
-            time.sleep(10)
-            chameleon_start_cmd = "cd {} && sh -c '. ./env.sh && \
-            nohup python chameleon/main.py -v --consul={}:8500 \
-            --fluentd={}:24224 --grpc-endpoint={}:50555 \
-            >/tmp/chameleon.log 2>&1 &'".format(self.voltha_loc,
-                                                self.get_ip('consul'),
-                                                self.get_ip('fluentd'),
-                                                self.get_ip('voltha'))
-        else:
-            #first start chameleon on the host as its only the reliable way for REST
-            chameleon_start_cmd = "cd {} && sh -c '. ./env.sh && \
-            nohup python chameleon/main.py -v --consul=localhost:8500 \
-            --fluentd={}:24224 --grpc-endpoint=localhost:50555 \
-            >/tmp/chameleon.log 2>&1 &'".format(self.voltha_loc,
-                                                self.get_ip('fluentd'))
-        if not self.service_running('python chameleon/main.py'):
-            ret = os.system(chameleon_start_cmd)
-            if ret != 0:
-                raise Exception('VOLTHA chameleon service not started. Failed with return code %d' %ret)
-            time.sleep(10)
-        else:
-            print('Chameleon voltha sevice is already running. Skipped start')
-
-        if self.CONTAINER_MODE is False:
-            #now start voltha and ofagent
-            voltha_setup_cmd = "cd {} && sh -c '. ./env.sh && make rebuild-venv && make protos'".format(self.voltha_loc)
-            voltha_start_cmd = "cd {} && sh -c '. ./env.sh && \
-            nohup python voltha/main.py -v --consul=localhost:8500 --kafka={}:9092 -I {} \
-            --fluentd={}:24224 --rest-port=8880 --grpc-port=50555 \
-            >/tmp/voltha.log 2>&1 &'".format(self.voltha_loc,
-                                             self.service_map['kafka']['ip'],
-                                             self.interface,
-                                             self.service_map['fluentd']['ip'])
-            pki_dir = '{}/pki'.format(self.voltha_loc)
-            if not self.service_running('python voltha/main.py'):
-                voltha_pki_dir = '/voltha'
-                if os.access(pki_dir, os.F_OK):
-                    pki_xfer_cmd = 'mkdir -p {} && cp -rv {}/pki {}'.format(voltha_pki_dir,
-                                                                            self.voltha_loc,
-                                                                            voltha_pki_dir)
-                    os.system(pki_xfer_cmd)
-                #os.system(voltha_setup_cmd)
-                ret = os.system(voltha_start_cmd)
-                if ret != 0:
-                    raise Exception('Failed to start VOLTHA. Return code %d' %ret)
-                time.sleep(10)
-            else:
-                print('VOLTHA core is already running. Skipped start')
-
-            ofagent_start_cmd = "cd {} && sh -c '. ./env.sh && \
-            nohup python ofagent/main.py -v --consul=localhost:8500 \
-            --fluentd={}:24224 --controller={}:6653 --grpc-endpoint=localhost:50555 \
-            >/tmp/ofagent.log 2>&1 &'".format(self.voltha_loc,
-                                              self.service_map['fluentd']['ip'],
-                                              self.controller)
-            if not self.service_running('python ofagent/main.py'):
-                ofagent_pki_dir = '/ofagent'
-                if os.access(pki_dir, os.F_OK):
-                    pki_xfer_cmd = 'mkdir -p {} && cp -rv {}/pki {}'.format(ofagent_pki_dir,
-                                                                            self.voltha_loc,
-                                                                            ofagent_pki_dir)
-                    os.system(pki_xfer_cmd)
-                ret = os.system(ofagent_start_cmd)
-                if ret != 0:
-                    raise Exception('VOLTHA ofagent not started. Failed with return code %d' %ret)
-                time.sleep(10)
-            else:
-                print('VOLTHA ofagent is already running. Skipped start')
-
-        ponsim_start_cmd = "cd {} && sh -c '. ./env.sh && \
-        nohup python ponsim/main.py -o {} -v >/tmp/ponsim.log 2>&1 &'".format(self.voltha_loc, self.num_onus)
-        if not self.service_running('python ponsim/main.py'):
-            ret = os.system(ponsim_start_cmd)
-            if ret != 0:
-                raise Exception('PONSIM not started. Failed with return code %d' %ret)
-            time.sleep(3)
-        else:
-            print('PONSIM already running. Skipped start')
-
-    def service_running(self, pattern):
-        st, _ = getstatusoutput('pgrep -f "{}"'.format(pattern))
-        return True if st == 0 else False
-
-    def kill_service(self, pattern):
-        st, output = getstatusoutput('pgrep -f "{}"'.format(pattern))
-        if st == 0 and output:
-            pids = output.strip().splitlines()
-            for pid in pids:
-                try:
-                    os.kill(int(pid), signal.SIGKILL)
-                except:
-                    pass
-
-    def stop(self):
-        if self.CONTAINER_MODE is False:
-            self.kill_service('python voltha/main.py')
-            self.kill_service('python ofagent/main.py')
-            self.kill_service('python ponsim/main.py')
-        self.kill_service('python chameleon/main.py')
-        service_stop_cmd = 'DOCKER_HOST_IP={} docker-compose -p {} -f {} down'.format(self.DOCKER_HOST_IP,
-                                                                                      self.PROJECT,
-                                                                                      self.compose_file_loc)
-        os.system(service_stop_cmd)
-
-    @classmethod
-    def get_ip(cls, service):
-        if service in cls.service_map:
-            return cls.service_map[service]['ip']
-        if service == cls.REST_SERVICE:
-            return os.getenv('VOLTHA_HOST', None)
-        return None
-
-    @classmethod
-    def get_network(cls, service):
-        if service in cls.service_map:
-            return cls.service_map[service]['network']
-        return None
-
-class VolthaCtrl(object):
-    UPLINK_VLAN_START = 333
-    UPLINK_VLAN_MAP = { 'of:0000000000000001' : '222' }
-    REST_PORT = 8882
-    HOST = '172.17.0.1'
-    ONOS_APPS = ('org.onosproject.hostprovider', 'org.onosproject.dhcp', 'org.onosproject.dhcp-relay', 'org.ciena.cordigmp')
-    ADMIN_STATE = 'admin_state'
-    OPER_STATUS = 'oper_status'
-    CONNECT_STATUS = 'connect_status'
-
-    def __init__(self, host = HOST, rest_port = REST_PORT, uplink_vlan_map = UPLINK_VLAN_MAP,
-                 uplink_vlan_start = UPLINK_VLAN_START):
-        self.host = host
-        self.rest_port = rest_port
-        self.rest_url = 'http://{}:{}/api/v1/local'.format(host, rest_port)
-        if rest_port == 8882:
-            self.rest_url = 'http://{}:{}/api/v1'.format(host, rest_port)
-            #self.ADMIN_STATE = 'adminState'
-            #self.OPER_STATUS = 'operStatus'
-            #self.CONNECT_STATUS = 'connectStatus'
-        self.uplink_vlan_map = uplink_vlan_map
-        VolthaCtrl.UPLINK_VLAN_START = uplink_vlan_start
-        self.switches = []
-        self.switch_map = {}
-
-    def config(self, fake = False, driver_configured = False):
-        devices = OnosCtrl.get_devices()
-        if not devices:
-            return self.switch_map
-        voltha_devices = filter(lambda d: not d['mfr'].startswith('Nicira'), devices)
-        self.switches = voltha_devices
-        device_config = { 'devices' : { } }
-        device_id = None
-        for device in voltha_devices:
-            device_id = device['id']
-            serial = device['serial']
-            ports = OnosCtrl.get_ports_device(device_id)
-            nni_ports = filter(lambda p: p['isEnabled'] and 'annotations' in p and p['annotations']['portName'].startswith('nni'), ports)
-            uni_ports = filter(lambda p: p['isEnabled'] and 'annotations' in p and p['annotations']['portName'].startswith('uni'), ports)
-            if device_id not in self.uplink_vlan_map:
-                uplink_vlan = VolthaCtrl.UPLINK_VLAN_START
-                VolthaCtrl.UPLINK_VLAN_START += 1
-                self.uplink_vlan_map[device_id] = uplink_vlan
-                log.info('Voltha device %s not in map. Using uplink vlan %d' %(device_id, uplink_vlan))
-            else:
-                uplink_vlan = self.uplink_vlan_map[device_id]
-            if not nni_ports:
-                log.info('Voltha device %s has no NNI ports' %device_id)
-                if fake is True:
-                    log.info('Faking NNI port 0')
-                    nni_ports = [ {'port': '0'} ]
-                else:
-                    log.info('Skip configuring device %s' %device_id)
-                    continue
-            if not uni_ports:
-                log.info('Voltha device %s has no UNI ports' %device_id)
-                if fake is True:
-                    log.info('Faking UNI port 252')
-                    uni_ports = [ {'port': '252'} ]
-                else:
-                    log.info('Skip configuring device %s' %device_id)
-                    continue
-            onu_ports = map(lambda uni: uni['port'], uni_ports)
-            onu_names = map(lambda uni: uni['annotations']['portName'], uni_ports)
-            onu_macs =  map(lambda uni: uni['annotations']['portMac'], uni_ports)
-            self.switch_map[device_id] = dict(uplink_vlan = uplink_vlan,
-                                              serial = serial,
-                                              ports = onu_ports,
-                                              names = onu_names,
-                                              macs = onu_macs)
-            device_config['devices'][device_id] = {}
-            device_config['devices'][device_id]['basic'] = dict(driver='voltha')
-            device_config['devices'][device_id]['accessDevice'] = dict(uplink=nni_ports[0]['port'],
-                                                                       vlan = uplink_vlan,
-                                                                       defaultVlan=str(onu_ports[0])
-                                                                       )
-        if device_id and driver_configured is False:
-            #toggle drivers/openflow base before reconfiguring the driver and olt config data
-            OnosCtrl('org.onosproject.drivers').deactivate()
-            OnosCtrl('org.onosproject.openflow-base').deactivate()
-            OnosCtrl.config(device_config)
-            time.sleep(10)
-            OnosCtrl('org.onosproject.drivers').activate()
-            OnosCtrl('org.onosproject.openflow-base').activate()
-            time.sleep(10)
-            log.info('Reactivating CORD and ONOS apps')
-            Onos.activate_cord_apps(deactivate = True)
-            Onos.activate_apps(self.ONOS_APPS, deactivate = True)
-
-        return self.switch_map
-
-    def get_devices(self):
-        url = '{}/devices'.format(self.rest_url)
-        resp = requests.get(url)
-        if resp.ok is not True or resp.status_code != 200:
-            return None
-        return resp.json()
-
-    def enable_device(self, olt_type, olt_mac = None, address = None):
-        url = '{}/devices'.format(self.rest_url)
-        if olt_mac is None and address is None:
-            log.error('Either olt mac or address needs to be specified')
-            return None, False
-        if olt_mac is not None:
-            device_config = { 'type' : olt_type, 'mac_address' : olt_mac }
-        else:
-            if len(address.split(':')) > 1:
-                device_config = { 'type' : olt_type, 'host_and_port' : address }
-            else:
-                device_config = { 'type' : olt_type, 'ipv4_address' : address }
-        #pre-provision
-        if olt_mac is not None:
-            log.info('Pre-provisioning %s with mac %s' %(olt_type, olt_mac))
-        else:
-            log.info('Pre-provisioning %s with address %s' %(olt_type, address))
-        resp = requests.post(url, data = json.dumps(device_config))
-        if resp.ok is not True or resp.status_code != 200:
-            return None, False
-        device_id = resp.json()['id']
-        log.info('Enabling device %s' %(device_id))
-        enable_url = '{}/{}/enable'.format(url, device_id)
-        resp = requests.post(enable_url)
-        if resp.ok is not True or resp.status_code != 200:
-            return None, False
-        #get operational status
-        time.sleep(10)
-        log.info('Checking operational status for device %s' %(device_id))
-        resp = requests.get('{}/{}'.format(url, device_id))
-        if resp.ok is not True or resp.status_code != 200:
-            return device_id, False
-        device_info = resp.json()
-        if device_info[self.OPER_STATUS] != 'ACTIVE' or \
-           device_info[self.ADMIN_STATE] != 'ENABLED' or \
-           device_info[self.CONNECT_STATUS] != 'REACHABLE':
-            return device_id, False
-
-        return device_id, True
-
-    def disable_device(self, device_id, delete = True):
-        log.info('Disabling device %s' %(device_id))
-        disable_url = '{}/devices/{}/disable'.format(self.rest_url, device_id)
-        resp = requests.post(disable_url)
-        if resp.ok is not True or resp.status_code != 200:
-            return False
-        if delete is True:
-            #rest for disable completion
-            time.sleep(10)
-            log.info('Deleting device %s' %(device_id))
-            delete_url = '{}/devices/{}/delete'.format(self.rest_url, device_id)
-            resp = requests.delete(delete_url)
-            if resp.status_code not in [204, 202, 200]:
-                return False
-        return True
-
-    def restart_device(self, device_id):
-        log.info('Restarting olt or onu device %s' %(device_id))
-        disable_url = '{}/devices/{}/restart'.format(self.rest_url, device_id)
-        resp = requests.post(disable_url)
-        if resp.ok is not True or resp.status_code != 200:
-            return False
-        return True
-
-    def pause_device(self, device_id):
-        log.info('Restarting olt or onu device %s' %(device_id))
-        disable_url = '{}/devices/{}/pause'.format(self.rest_url, device_id)
-        resp = requests.post(disable_url)
-        if resp.ok is not True or resp.status_code != 200:
-            return False
-        return True
-
-    def get_operational_status(self, device_id):
-        url = '{}/devices'.format(self.rest_url)
-        log.info('Checking operational status for device %s' %(device_id))
-        resp = requests.get('{}/{}'.format(url, device_id))
-        if resp.ok is not True or resp.status_code != 200:
-            return False
-        device_info = resp.json()
-        if device_info[self.OPER_STATUS] != 'ACTIVE' or \
-           device_info[self.ADMIN_STATE] != 'ENABLED' or \
-           device_info[self.CONNECT_STATUS] != 'REACHABLE':
-           return False
-        return True
-
-    def check_preprovision_status(self, device_id):
-        url = '{}/devices'.format(self.rest_url)
-        log.info('Check if device %s is in Preprovisioning state'%(device_id))
-        resp = requests.get('{}/{}'.format(url, device_id))
-        if resp.ok is not True or resp.status_code != 200:
-           return False
-        device_info = resp.json()
-        if device_info[self.ADMIN_STATE] == 'PREPROVISIONED':
-           return True
-        return False
-
-def get_olt_app():
-    our_path = os.path.dirname(os.path.realpath(__file__))
-    version = Onos.getVersion()
-    major = int(version.split('.')[0])
-    minor = int(version.split('.')[1])
-    olt_app_version = '1.2-SNAPSHOT'
-    if major > 1:
-        olt_app_version = '3.0-SNAPSHOT'
-    elif major == 1:
-        if minor >= 10:
-            olt_app_version = '3.0-SNAPSHOT'
-        elif minor <= 8:
-            olt_app_version = '1.1-SNAPSHOT'
-    olt_app_file = os.path.join(our_path, '..', 'apps/olt-app-{}.oar'.format(olt_app_version))
-    return olt_app_file
-
-def voltha_setup(host = '172.17.0.1', ponsim_host = VolthaService.PONSIM_HOST, olt_ip = None, rest_port = VolthaCtrl.REST_PORT,
-                 olt_type = 'ponsim_olt', olt_mac = '00:0c:e2:31:12:00',
-                 uplink_vlan_map = VolthaCtrl.UPLINK_VLAN_MAP,
-                 uplink_vlan_start = VolthaCtrl.UPLINK_VLAN_START,
-                 config_fake = False, olt_app = None, teardown = True):
-    devices = OnosCtrl.get_devices()
-    olt_devices = filter(lambda d: not d['mfr'].startswith('Nicira') and d['driver'] == 'voltha', devices)
-    voltha = VolthaCtrl(host, rest_port = rest_port,
-                        uplink_vlan_map = uplink_vlan_map,
-                        uplink_vlan_start = uplink_vlan_start)
-    voltha_devices = voltha.get_devices()
-    if voltha_devices:
-        voltha_device_ids = filter(lambda d: d[voltha.OPER_STATUS] == 'ACTIVE' and d[voltha.ADMIN_STATE] == 'ENABLED',
-                                   voltha_devices['items'])
-    else:
-        voltha_device_ids = []
-
-    driver_configured = len(olt_devices) > 0 and len(voltha_device_ids) > 0
-    if olt_type.startswith('ponsim'):
-        if driver_configured:
-            device_id, status = voltha_device_ids[0], True
-        else:
-            ponsim_address = '{}:50060'.format(ponsim_host)
-            log.info('Enabling ponsim olt')
-            device_id, status = voltha.enable_device(olt_type, address = ponsim_address)
-    else:
-        if driver_configured:
-            device_id, status = voltha_device_ids[0], True
-        else:
-            if olt_type.startswith('maple') or olt_ip:
-                if olt_ip:
-                    log.info('Enabling %s' %olt_type)
-                    device_id, status = voltha.enable_device(olt_type, address = olt_ip)
-                else:
-                    log.info('OLT IP needs to be specified for %s' %olt_type)
-            else:
-                log.info('Enabling OLT instance for %s with mac %s' %(olt_type, olt_mac))
-                device_id, status = voltha.enable_device(olt_type, olt_mac)
-
-    if device_id is None or status is False:
-        if device_id:
-            voltha.disable_device(device_id)
-        return None
-
-    switch_map = None
-    olt_installed = False
-    if olt_app is None:
-        olt_app = get_olt_app()
-    try:
-        time.sleep(5)
-        switch_map = voltha.config(fake = config_fake, driver_configured = driver_configured)
-        if switch_map is None:
-            voltha.disable_device(device_id)
-            return None
-        log.info('Installing OLT app %s' %olt_app)
-        OnosCtrl.install_app(olt_app)
-        olt_installed = True
-        time.sleep(5)
-        OnosCtrl.config_olt_component()
-        return voltha, device_id, switch_map, driver_configured
-    except:
-        voltha.disable_device(device_id)
-        time.sleep(10)
-        if olt_installed is True:
-            log.info('Uninstalling OLT app %s' %olt_app)
-            OnosCtrl.uninstall_app(olt_app)
-
-    return None
-
-def voltha_teardown(voltha_ctrl, device_id, switch_map, olt_app = None):
-    if voltha_ctrl:
-        voltha_ctrl.disable_device(device_id)
-    time.sleep(10)
-    if olt_app is None:
-        olt_app = get_olt_app()
-    log.info('Uninstalling OLT app %s' %olt_app)
-    OnosCtrl.uninstall_app(olt_app)
diff --git a/src/test/utils/Xos.py b/src/test/utils/Xos.py
deleted file mode 100644
index 991455c..0000000
--- a/src/test/utils/Xos.py
+++ /dev/null
@@ -1,110 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os,time
-from CordContainer import Container
-from CordTestUtils import log_test
-
-class XosBase(object):
-    workspace = '/tmp/xos_scratch_workspace'
-    image = 'xosproject/xos'
-    tag = 'latest'
-
-    @classmethod
-    def clone(cls, update = False):
-        fetch_cmd = 'mkdir -p {} && cd {} && \
-                     git clone http://gerrit.opencord.org/xos'.format(cls.workspace, cls.workspace)
-        fetch = True
-        if os.access(cls.workspace, os.F_OK):
-            fetch = update
-            if update is True:
-                os.system('rm -rf {}'.format(cls.workspace))
-        if fetch is True:
-            ##fetch the xos
-            os.system(fetch_cmd)
-
-    @classmethod
-    def build_images(cls):
-        images = ( ['xos', ('base', 'build',),],
-                   ['postgresql', ('build',),],
-                   ['synchronizer', ('build',),],
-                   ['onboarding_synchronizer', ('build',),],
-                   ['syndicate-ms', ('build',),],
-                  )
-
-        for cnt, targets in images:
-            for target in targets:
-                xos_dir = 'cd {}/xos/containers/{} && make {}'.format(cls.workspace, cnt, target)
-                os.system(xos_dir)
-
-class XosServiceProfile(XosBase):
-
-    def __init__(self, profile = 'cord-pod', update = False):
-        self.workspace = XosBase.workspace
-        self.profile = profile
-        self.service_dir = '{}/service-profile'.format(self.workspace)
-        self.profile_dir = '{}/{}'.format(self.service_dir, profile)
-        XosBase.clone(update = update)
-        self.__clone(update = update)
-
-    def __clone(self, update = False):
-        fetch_cmd = 'cd {} && git clone http://gerrit.opencord.org/service-profile'.format(self.workspace)
-        fetch = True
-        if os.access(self.service_dir, os.F_OK):
-            fetch = update
-            if update is True:
-                os.system('rm -rf {}'.format(self.service_dir))
-        if fetch:
-            os.system(fetch_cmd)
-
-    def __ssh_key_check(self):
-        id_rsa = '{}/.ssh/id_rsa'.format(os.getenv('HOME'))
-        if not os.access(id_rsa, os.F_OK):
-            return False
-        return True
-
-    def __ssh_copy_keys(self, dest):
-        cmd = 'cp -v {}/.ssh/id_rsa* {}'.format(os.getenv('HOME'), dest)
-        return os.system(cmd)
-
-    def build_images(self, force = False):
-        if force is True or not Container.image_exists('{}:{}'.format(XosBase.image, XosBase.tag)):
-            XosBase.build_images()
-
-    def start_services(self):
-        if not self.__ssh_key_check():
-            log_test.info('SSH keys need to be generated before building XOS service containers')
-            log_test.info('Use the following commands to generate ssh keys')
-            log_test.info('ssh-keygen -t rsa -q -N ""')
-            log_test.info('ssh-copy-id -i $HOME/.ssh/id_rsa ubuntu@localhost')
-            return False
-        if not os.access(self.profile_dir, os.F_OK):
-            log_test.error('Profile directory %s does not exist' %self.profile_dir)
-            return False
-        self.build_images()
-        ##copy the keys to the profile dir
-        self.__ssh_copy_keys(self.profile_dir)
-        service_cmd = 'cd {} && make dirs download_services bootstrap onboarding'.format(self.profile_dir)
-        return os.system(service_cmd)
-
-    def stop_services(self, rm = False):
-        if os.access(self.profile_dir, os.F_OK):
-            cmds = ['cd {}'.format(self.profile_dir), 'make stop']
-            if rm is True:
-                cmds += ['make rm']
-            cmd = ' && '.join(cmds)
-            return os.system(cmd) == 0
-        return False
diff --git a/src/test/utils/__init__.py b/src/test/utils/__init__.py
deleted file mode 100644
index 25285f9..0000000
--- a/src/test/utils/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(utils_dir)
-__path__.append(cli_dir)
diff --git a/src/test/utils/threadPool.py b/src/test/utils/threadPool.py
deleted file mode 100644
index ec7e81d..0000000
--- a/src/test/utils/threadPool.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import threading
-import Queue
-
-class PoolThread(threading.Thread):
-
-    def __init__(self, requests_queue, wait_timeout, daemon, **kwds):
-        threading.Thread.__init__(self, **kwds)
-        self.daemon = daemon
-        self._queue = requests_queue
-        self._wait_timeout = wait_timeout
-        self._finished = threading.Event()
-        self.start()
-
-    def run(self):
-        while True:
-            if(self._finished.isSet()):
-                break
-
-            try:
-                work = self._queue.get(block=True, timeout=self._wait_timeout)
-            except Queue.Empty:
-                continue
-            else:
-                try:
-                    work.__call__()
-                finally:
-                    self._queue.task_done()
-
-
-
-class ThreadPool:
-
-    def __init__(self, pool_size, daemon=False, queue_size=0, wait_timeout=5):
-        """Set up the thread pool and create pool_size threads
-        """
-        self._queue = Queue.Queue(queue_size)
-        self._daemon = daemon
-        self._threads = []
-        self._pool_size = pool_size
-        self._wait_timeout = wait_timeout
-        self.createThreads()
-
-
-    def addTask(self, callableObject):
-        if (callable(callableObject)):
-            self._queue.put(callableObject, block=True)
-
-    def cleanUpThreads(self):
-        self._queue.join()
-
-        for t in self._threads:
-            t._finished.set()
-
-
-    def createThreads(self):
-        for i in range(self._pool_size):
-            self._threads.append(PoolThread(self._queue, self._wait_timeout, self._daemon))
-
-
-class CallObject:
-    def __init__(self, v = 0): 
-        self.v = v
-    def callCb(self): 
-        print 'Inside callback for %d' %self.v
-
-if __name__ == '__main__':
-    import multiprocessing
-    callList = []
-    cpu_count = multiprocessing.cpu_count()
-    for i in xrange(cpu_count * 2):
-        callList.append(CallObject(i))
-    tp = ThreadPool(cpu_count * 2, queue_size=1, wait_timeout=1)
-    for i in range(40):
-        callObject = callList[i% (cpu_count*2)]
-        f = callObject.callCb
-        tp.addTask(f)
-
-    tp.cleanUpThreads()
-
-
diff --git a/src/test/utils/tls_cert.py b/src/test/utils/tls_cert.py
deleted file mode 100644
index ffdf2cc..0000000
--- a/src/test/utils/tls_cert.py
+++ /dev/null
@@ -1,1076 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## This file is part of Scapy
-## See http://www.secdev.org/projects/scapy for more informations
-## Copyright (C) Arnaud Ebalard <arno@natisbad.org>
-## This program is published under a GPLv2 license
-
-"""
-Cryptographic certificates.
-"""
-
-import os, sys, math, struct, random
-from scapy.utils import strxor
-from scapy_ssl_tls.ssl_tls_crypto import x509_extract_pubkey_from_der
-try:
-    HAS_HASHLIB=True
-    import hashlib
-except:
-    HAS_HASHLIB=False
-
-from Crypto.PublicKey import *
-from Crypto.Cipher import *
-from Crypto.Hash import *
-from Crypto.Util import number
-
-# Maximum allowed size in bytes for a certificate file, to avoid
-# loading huge file when importing a cert
-MAX_KEY_SIZE=50*1024
-
-#####################################################################
-# Some helpers
-#####################################################################
-
-def warning(m):
-    print "WARNING: %s" % m
-
-def randstring(l):
-    """
-    Returns a random string of length l (l >= 0)
-    """
-    tmp = map(lambda x: struct.pack("B", random.randrange(0, 256, 1)), [""]*l)
-    return "".join(tmp)
-
-def zerofree_randstring(l):
-    """
-    Returns a random string of length l (l >= 0) without zero in it.
-    """
-    tmp = map(lambda x: struct.pack("B", random.randrange(1, 256, 1)), [""]*l)
-    return "".join(tmp)
-
-def strand(s1, s2):
-    """
-    Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
-    must be of same length.
-    """
-    return "".join(map(lambda x,y:chr(ord(x)&ord(y)), s1, s2))
-
-# OS2IP function defined in RFC 3447 for octet string to integer conversion
-def pkcs_os2ip(x):
-    """
-    Accepts a byte string as input parameter and return the associated long
-    value:
-
-    Input : x        octet string to be converted
-
-    Output: x        corresponding nonnegative integer
-
-    Reverse function is pkcs_i2osp()
-    """
-    return number.bytes_to_long(x) 
-
-# IP2OS function defined in RFC 3447 for octet string to integer conversion
-def pkcs_i2osp(x,xLen):
-    """
-    Converts a long (the first parameter) to the associated byte string
-    representation of length l (second parameter). Basically, the length
-    parameters allow the function to perform the associated padding.
-
-    Input : x        nonnegative integer to be converted
-            xLen     intended length of the resulting octet string
-
-    Output: x        corresponding nonnegative integer
-
-    Reverse function is pkcs_os2ip().
-    """
-    z = number.long_to_bytes(x)
-    padlen = max(0, xLen-len(z))
-    return '\x00'*padlen + z
-
-# for every hash function a tuple is provided, giving access to
-# - hash output length in byte
-# - associated hash function that take data to be hashed as parameter
-#   XXX I do not provide update() at the moment.
-# - DER encoding of the leading bits of digestInfo (the hash value
-#   will be concatenated to create the complete digestInfo).
-#
-# Notes:
-# - MD4 asn.1 value should be verified. Also, as stated in
-#   PKCS#1 v2.1, MD4 should not be used.
-# - hashlib is available from http://code.krypto.org/python/hashlib/
-# - 'tls' one is the concatenation of both md5 and sha1 hashes used
-#   by SSL/TLS when signing/verifying things
-_hashFuncParams = {
-    "md2"    : (16, 
-                lambda x: MD2.new(x).digest(),
-                '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x02\x05\x00\x04\x10'),
-    "md4"    : (16,
-                lambda x: MD4.new(x).digest(), 
-                '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04\x05\x00\x04\x10'), # is that right ?
-    "md5"    : (16, 
-                lambda x: MD5.new(x).digest(), 
-                '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
-    "sha1"   : (20,
-                lambda x: SHA.new(x).digest(), 
-                '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
-    "tls"    : (36,
-                lambda x: MD5.new(x).digest() + SHA.new(x).digest(),
-                '') }
-
-if HAS_HASHLIB:
-    _hashFuncParams["sha224"] = (28,
-                lambda x: hashlib.sha224(x).digest(),
-                '\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c')
-    _hashFuncParams["sha256"] = (32,
-                lambda x: hashlib.sha256(x).digest(),
-                '\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')
-    _hashFuncParams["sha384"] = (48,
-                lambda x: hashlib.sha384(x).digest(),
-               '\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30')
-    _hashFuncParams["sha512"] = (64,
-               lambda x: hashlib.sha512(x).digest(),
-               '\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40')
-else:
-    warning("hashlib support is not available. Consider installing it")
-    warning("if you need sha224, sha256, sha384 and sha512 algs.")
-
-def pkcs_mgf1(mgfSeed, maskLen, h):
-    """
-    Implements generic MGF1 Mask Generation function as described in
-    Appendix B.2.1 of RFC 3447. The hash function is passed by name.
-    valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
-    'sha384' and 'sha512'. Returns None on error.
-
-    Input:
-       mgfSeed: seed from which mask is generated, an octet string
-       maskLen: intended length in octets of the mask, at most 2^32 * hLen
-                hLen (see below)
-       h      : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-                'sha256', 'sha384'). hLen denotes the length in octets of
-                the hash function output.
-
-    Output:
-       an octet string of length maskLen
-    """
-
-    # steps are those of Appendix B.2.1
-    if not _hashFuncParams.has_key(h):
-        warning("pkcs_mgf1: invalid hash (%s) provided")
-        return None
-    hLen = _hashFuncParams[h][0]
-    hFunc = _hashFuncParams[h][1]
-    if maskLen > 2**32 * hLen:                               # 1)
-        warning("pkcs_mgf1: maskLen > 2**32 * hLen")
-        return None
-    T = ""                                                   # 2)
-    maxCounter = math.ceil(float(maskLen) / float(hLen))     # 3)
-    counter = 0
-    while counter < maxCounter:
-        C = pkcs_i2osp(counter, 4)
-        T += hFunc(mgfSeed + C)
-        counter += 1
-    return T[:maskLen]
-
-
-def pkcs_emsa_pss_encode(M, emBits, h, mgf, sLen):
-    """
-    Implements EMSA-PSS-ENCODE() function described in Sect. 9.1.1 of RFC 3447
-
-    Input:
-       M     : message to be encoded, an octet string
-       emBits: maximal bit length of the integer resulting of pkcs_os2ip(EM),
-               where EM is the encoded message, output of the function.
-       h     : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-               'sha256', 'sha384'). hLen denotes the length in octets of
-               the hash function output.
-       mgf   : the mask generation function f : seed, maskLen -> mask
-       sLen  : intended length in octets of the salt
-
-    Output:
-       encoded message, an octet string of length emLen = ceil(emBits/8)
-
-    On error, None is returned.
-    """
-
-    # 1) is not done
-    hLen = _hashFuncParams[h][0]                             # 2)
-    hFunc = _hashFuncParams[h][1]
-    mHash = hFunc(M)
-    emLen = int(math.ceil(emBits/8.))
-    if emLen < hLen + sLen + 2:                              # 3)
-        warning("encoding error (emLen < hLen + sLen + 2)")
-        return None
-    salt = randstring(sLen)                                  # 4)
-    MPrime = '\x00'*8 + mHash + salt                         # 5)
-    H = hFunc(MPrime)                                        # 6)
-    PS = '\x00'*(emLen - sLen - hLen - 2)                    # 7)
-    DB = PS + '\x01' + salt                                  # 8)
-    dbMask = mgf(H, emLen - hLen - 1)                        # 9)
-    maskedDB = strxor(DB, dbMask)                            # 10)
-    l = (8*emLen - emBits)/8                                 # 11)
-    rem = 8*emLen - emBits - 8*l # additionnal bits
-    andMask = l*'\x00'
-    if rem:
-        j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))
-        andMask += j
-        l += 1
-    maskedDB = strand(maskedDB[:l], andMask) + maskedDB[l:]
-    EM = maskedDB + H + '\xbc'                               # 12)
-    return EM                                                # 13)
-
-
-def pkcs_emsa_pss_verify(M, EM, emBits, h, mgf, sLen):
-    """
-    Implements EMSA-PSS-VERIFY() function described in Sect. 9.1.2 of RFC 3447
-
-    Input:
-       M     : message to be encoded, an octet string
-       EM    : encoded message, an octet string of length emLen = ceil(emBits/8)
-       emBits: maximal bit length of the integer resulting of pkcs_os2ip(EM)
-       h     : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-               'sha256', 'sha384'). hLen denotes the length in octets of
-               the hash function output.
-       mgf   : the mask generation function f : seed, maskLen -> mask
-       sLen  : intended length in octets of the salt
-
-    Output:
-       True if the verification is ok, False otherwise.
-    """
-
-    # 1) is not done
-    hLen = _hashFuncParams[h][0]                             # 2)
-    hFunc = _hashFuncParams[h][1]
-    mHash = hFunc(M)
-    emLen = int(math.ceil(emBits/8.))                        # 3)
-    if emLen < hLen + sLen + 2:
-        return False
-    if EM[-1] != '\xbc':                                     # 4)
-        return False
-    l = emLen - hLen - 1                                     # 5)
-    maskedDB = EM[:l]
-    H = EM[l:l+hLen]
-    l = (8*emLen - emBits)/8                                 # 6)
-    rem = 8*emLen - emBits - 8*l # additionnal bits
-    andMask = l*'\xff'
-    if rem:
-        val = reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem)))
-        j = chr(~val & 0xff)
-        andMask += j
-        l += 1
-    if strand(maskedDB[:l], andMask) != '\x00'*l:
-        return False
-    dbMask = mgf(H, emLen - hLen - 1)                        # 7)
-    DB = strxor(maskedDB, dbMask)                            # 8)
-    l = (8*emLen - emBits)/8                                 # 9)
-    rem = 8*emLen - emBits - 8*l # additionnal bits
-    andMask = l*'\x00'
-    if rem:
-        j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))
-        andMask += j
-        l += 1
-    DB = strand(DB[:l], andMask) + DB[l:]
-    l = emLen - hLen - sLen - 1                              # 10)
-    if DB[:l] != '\x00'*(l-1) + '\x01':
-        return False
-    salt = DB[-sLen:]                                        # 11)
-    MPrime = '\x00'*8 + mHash + salt                         # 12)
-    HPrime = hFunc(MPrime)                                   # 13)
-    return H == HPrime                                       # 14)
-
-
-def pkcs_emsa_pkcs1_v1_5_encode(M, emLen, h): # section 9.2 of RFC 3447
-    """
-    Implements EMSA-PKCS1-V1_5-ENCODE() function described in Sect.
-    9.2 of RFC 3447.
-
-    Input:
-       M    : message to be encode, an octet string
-       emLen: intended length in octets of the encoded message, at least
-              tLen + 11, where tLen is the octet length of the DER encoding
-              T of a certain value computed during the encoding operation.
-       h    : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-              'sha256', 'sha384'). hLen denotes the length in octets of
-              the hash function output.
-
-    Output:
-       encoded message, an octet string of length emLen
-
-    On error, None is returned.
-    """
-    hLen = _hashFuncParams[h][0]                             # 1)
-    hFunc = _hashFuncParams[h][1]
-    H = hFunc(M)
-    hLeadingDigestInfo = _hashFuncParams[h][2]               # 2)
-    T = hLeadingDigestInfo + H
-    tLen = len(T)
-    if emLen < tLen + 11:                                    # 3)
-        warning("pkcs_emsa_pkcs1_v1_5_encode: intended encoded message length too short")
-        return None
-    PS = '\xff'*(emLen - tLen - 3)                           # 4)
-    EM = '\x00' + '\x01' + PS + '\x00' + T                   # 5)
-    return EM                                                # 6)
-
-
-#####################################################################
-# Public Key Cryptography related stuff
-#####################################################################
-
-class _EncryptAndVerify:
-    ### Below are encryption methods
-
-    def _rsaep(self, m):
-        """
-        Internal method providing raw RSA encryption, i.e. simple modular
-        exponentiation of the given message representative 'm', a long
-        between 0 and n-1.
-
-        This is the encryption primitive RSAEP described in PKCS#1 v2.1,
-        i.e. RFC 3447 Sect. 5.1.1.
-
-        Input:
-           m: message representative, a long between 0 and n-1, where
-              n is the key modulus.
-
-        Output:
-           ciphertext representative, a long between 0 and n-1
-
-        Not intended to be used directly. Please, see encrypt() method.
-        """
-
-        n = self.modulus
-        if type(m) is int:
-            m = long(m)
-        if type(m) is not long or m > n-1:
-            warning("Key._rsaep() expects a long between 0 and n-1")
-            return None
-
-        return self.key.encrypt(m, "")[0]
-
-
-    def _rsaes_pkcs1_v1_5_encrypt(self, M):
-        """
-        Implements RSAES-PKCS1-V1_5-ENCRYPT() function described in section
-        7.2.1 of RFC 3447.
-
-        Input:
-           M: message to be encrypted, an octet string of length mLen, where
-              mLen <= k - 11 (k denotes the length in octets of the key modulus)
-
-        Output:
-           ciphertext, an octet string of length k
-
-        On error, None is returned.
-        """
-
-        # 1) Length checking
-        mLen = len(M)
-        k = self.modulusLen / 8
-        if mLen > k - 11:
-            warning("Key._rsaes_pkcs1_v1_5_encrypt(): message too "
-                    "long (%d > %d - 11)" % (mLen, k))
-            return None
-
-        # 2) EME-PKCS1-v1_5 encoding
-        PS = zerofree_randstring(k - mLen - 3)      # 2.a)
-        EM = '\x00' + '\x02' + PS + '\x00' + M      # 2.b)
-
-        # 3) RSA encryption
-        m = pkcs_os2ip(EM)                          # 3.a)
-        c = self._rsaep(m)                          # 3.b)
-        C = pkcs_i2osp(c, k)                        # 3.c)
-
-        return C                                    # 4)
-
-
-    def _rsaes_oaep_encrypt(self, M, h=None, mgf=None, L=None):
-        """
-        Internal method providing RSAES-OAEP-ENCRYPT as defined in Sect.
-        7.1.1 of RFC 3447. Not intended to be used directly. Please, see
-        encrypt() method for type "OAEP".
-
-
-        Input:
-           M  : message to be encrypted, an octet string of length mLen
-                where mLen <= k - 2*hLen - 2 (k denotes the length in octets
-                of the RSA modulus and hLen the length in octets of the hash
-                function output)
-           h  : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-                'sha256', 'sha384'). hLen denotes the length in octets of
-                the hash function output. 'sha1' is used by default if not
-                provided.
-           mgf: the mask generation function f : seed, maskLen -> mask
-           L  : optional label to be associated with the message; the default
-                value for L, if not provided is the empty string
-
-        Output:
-           ciphertext, an octet string of length k
-
-        On error, None is returned.
-        """
-        # The steps below are the one described in Sect. 7.1.1 of RFC 3447.
-        # 1) Length Checking
-                                                    # 1.a) is not done
-        mLen = len(M)
-        if h is None:
-            h = "sha1"
-        if not _hashFuncParams.has_key(h):
-            warning("Key._rsaes_oaep_encrypt(): unknown hash function %s.", h)
-            return None
-        hLen = _hashFuncParams[h][0]
-        hFun = _hashFuncParams[h][1]
-        k = self.modulusLen / 8
-        if mLen > k - 2*hLen - 2:                   # 1.b)
-            warning("Key._rsaes_oaep_encrypt(): message too long.")
-            return None
-
-        # 2) EME-OAEP encoding
-        if L is None:                               # 2.a)
-            L = ""
-        lHash = hFun(L)
-        PS = '\x00'*(k - mLen - 2*hLen - 2)         # 2.b)
-        DB = lHash + PS + '\x01' + M                # 2.c)
-        seed = randstring(hLen)                     # 2.d)
-        if mgf is None:                             # 2.e)
-            mgf = lambda x,y: pkcs_mgf1(x,y,h)
-        dbMask = mgf(seed, k - hLen - 1)
-        maskedDB = strxor(DB, dbMask)               # 2.f)
-        seedMask = mgf(maskedDB, hLen)              # 2.g)
-        maskedSeed = strxor(seed, seedMask)         # 2.h)
-        EM = '\x00' + maskedSeed + maskedDB         # 2.i)
-
-        # 3) RSA Encryption
-        m = pkcs_os2ip(EM)                          # 3.a)
-        c = self._rsaep(m)                          # 3.b)
-        C = pkcs_i2osp(c, k)                        # 3.c)
-
-        return C                                    # 4)
-
-
-    def encrypt(self, m, t=None, h=None, mgf=None, L=None):
-        """
-        Encrypt message 'm' using 't' encryption scheme where 't' can be:
-
-        - None: the message 'm' is directly applied the RSAEP encryption
-                primitive, as described in PKCS#1 v2.1, i.e. RFC 3447
-                Sect 5.1.1. Simply put, the message undergo a modular
-                exponentiation using the public key. Additionnal method
-                parameters are just ignored.
-
-        - 'pkcs': the message 'm' is applied RSAES-PKCS1-V1_5-ENCRYPT encryption
-                scheme as described in section 7.2.1 of RFC 3447. In that
-                context, other parameters ('h', 'mgf', 'l') are not used.
-
-        - 'oaep': the message 'm' is applied the RSAES-OAEP-ENCRYPT encryption
-                scheme, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
-                7.1.1. In that context,
-
-                o 'h' parameter provides the name of the hash method to use.
-                  Possible values are "md2", "md4", "md5", "sha1", "tls",
-                  "sha224", "sha256", "sha384" and "sha512". if none is provided,
-                  sha1 is used.
-
-                o 'mgf' is the mask generation function. By default, mgf
-                  is derived from the provided hash function using the
-                  generic MGF1 (see pkcs_mgf1() for details).
-
-                o 'L' is the optional label to be associated with the
-                  message. If not provided, the default value is used, i.e
-                  the empty string. No check is done on the input limitation
-                  of the hash function regarding the size of 'L' (for
-                  instance, 2^61 - 1 for SHA-1). You have been warned.
-        """
-
-        if t is None: # Raw encryption
-            m = pkcs_os2ip(m)
-            c = self._rsaep(m)
-            return pkcs_i2osp(c, self.modulusLen/8)
-
-        elif t == "pkcs":
-            return self._rsaes_pkcs1_v1_5_encrypt(m)
-
-        elif t == "oaep":
-            return self._rsaes_oaep_encrypt(m, h, mgf, L)
-
-        else:
-            warning("Key.encrypt(): Unknown encryption type (%s) provided" % t)
-            return None
-
-    ### Below are verification related methods
-
-    def _rsavp1(self, s):
-        """
-        Internal method providing raw RSA verification, i.e. simple modular
-        exponentiation of the given signature representative 'c', an integer
-        between 0 and n-1.
-
-        This is the signature verification primitive RSAVP1 described in
-        PKCS#1 v2.1, i.e. RFC 3447 Sect. 5.2.2.
-
-        Input:
-          s: signature representative, an integer between 0 and n-1,
-             where n is the key modulus.
-
-        Output:
-           message representative, an integer between 0 and n-1
-
-        Not intended to be used directly. Please, see verify() method.
-        """
-        return self._rsaep(s)
-
-    def _rsassa_pss_verify(self, M, S, h=None, mgf=None, sLen=None):
-        """
-        Implements RSASSA-PSS-VERIFY() function described in Sect 8.1.2
-        of RFC 3447
-
-        Input:
-           M: message whose signature is to be verified
-           S: signature to be verified, an octet string of length k, where k
-              is the length in octets of the RSA modulus n.
-
-        Output:
-           True is the signature is valid. False otherwise.
-        """
-
-        # Set default parameters if not provided
-        if h is None: # By default, sha1
-            h = "sha1"
-        if not _hashFuncParams.has_key(h):
-            warning("Key._rsassa_pss_verify(): unknown hash function "
-                    "provided (%s)" % h)
-            return False
-        if mgf is None: # use mgf1 with underlying hash function
-            mgf = lambda x,y: pkcs_mgf1(x, y, h)
-        if sLen is None: # use Hash output length (A.2.3 of RFC 3447)
-            hLen = _hashFuncParams[h][0]
-            sLen = hLen
-
-        # 1) Length checking
-        modBits = self.modulusLen
-        k = modBits / 8
-        if len(S) != k:
-            return False
-
-        # 2) RSA verification
-        s = pkcs_os2ip(S)                           # 2.a)
-        m = self._rsavp1(s)                         # 2.b)
-        emLen = math.ceil((modBits - 1) / 8.)       # 2.c)
-        EM = pkcs_i2osp(m, emLen)
-
-        # 3) EMSA-PSS verification
-        Result = pkcs_emsa_pss_verify(M, EM, modBits - 1, h, mgf, sLen)
-
-        return Result                               # 4)
-
-
-    def _rsassa_pkcs1_v1_5_verify(self, M, S, h):
-        """
-        Implements RSASSA-PKCS1-v1_5-VERIFY() function as described in
-        Sect. 8.2.2 of RFC 3447.
-
-        Input:
-           M: message whose signature is to be verified, an octet string
-           S: signature to be verified, an octet string of length k, where
-              k is the length in octets of the RSA modulus n
-           h: hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-                'sha256', 'sha384').
-
-        Output:
-           True if the signature is valid. False otherwise.
-        """
-
-        # 1) Length checking
-        k = self.modulusLen / 8
-        if len(S) != k:
-            warning("invalid signature (len(S) != k)")
-            return False
-
-        # 2) RSA verification
-        s = pkcs_os2ip(S)                           # 2.a)
-        m = self._rsavp1(s)                         # 2.b)
-        EM = pkcs_i2osp(m, k)                       # 2.c)
-
-        # 3) EMSA-PKCS1-v1_5 encoding
-        EMPrime = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)
-        if EMPrime is None:
-            warning("Key._rsassa_pkcs1_v1_5_verify(): unable to encode.")
-            return False
-
-        # 4) Comparison
-        return EM == EMPrime
-
-
-    def verify(self, M, S, t=None, h=None, mgf=None, sLen=None):
-        """
-        Verify alleged signature 'S' is indeed the signature of message 'M' using
-        't' signature scheme where 't' can be:
-
-        - None: the alleged signature 'S' is directly applied the RSAVP1 signature
-                primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
-                5.2.1. Simply put, the provided signature is applied a moular
-                exponentiation using the public key. Then, a comparison of the
-                result is done against 'M'. On match, True is returned.
-                Additionnal method parameters are just ignored.
-
-        - 'pkcs': the alleged signature 'S' and message 'M' are applied
-                RSASSA-PKCS1-v1_5-VERIFY signature verification scheme as
-                described in Sect. 8.2.2 of RFC 3447. In that context,
-                the hash function name is passed using 'h'. Possible values are
-                "md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
-                and "sha512". If none is provided, sha1 is used. Other additionnal
-                parameters are ignored.
-
-        - 'pss': the alleged signature 'S' and message 'M' are applied
-                RSASSA-PSS-VERIFY signature scheme as described in Sect. 8.1.2.
-                of RFC 3447. In that context,
-
-                o 'h' parameter provides the name of the hash method to use.
-                   Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
-                   "sha256", "sha384" and "sha512". if none is provided, sha1
-                   is used.
-
-                o 'mgf' is the mask generation function. By default, mgf
-                   is derived from the provided hash function using the
-                   generic MGF1 (see pkcs_mgf1() for details).
-
-                o 'sLen' is the length in octet of the salt. You can overload the
-                  default value (the octet length of the hash value for provided
-                  algorithm) by providing another one with that parameter.
-        """
-        if t is None: # RSAVP1
-            S = pkcs_os2ip(S)
-            n = self.modulus
-            if S > n-1:
-                warning("Signature to be verified is too long for key modulus")
-                return False
-            m = self._rsavp1(S)
-            if m is None:
-                return False
-            l = int(math.ceil(math.log(m, 2) / 8.)) # Hack
-            m = pkcs_i2osp(m, l)
-            return M == m
-
-        elif t == "pkcs": # RSASSA-PKCS1-v1_5-VERIFY
-            if h is None:
-                h = "sha1"
-            return self._rsassa_pkcs1_v1_5_verify(M, S, h)
-
-        elif t == "pss": # RSASSA-PSS-VERIFY
-            return self._rsassa_pss_verify(M, S, h, mgf, sLen)
-
-        else:
-            warning("Key.verify(): Unknown signature type (%s) provided" % t)
-            return None
-
-class _DecryptAndSignMethods:
-    ### Below are decryption related methods. Encryption ones are inherited
-    ### from PubKey
-
-    def _rsadp(self, c):
-        """
-        Internal method providing raw RSA decryption, i.e. simple modular
-        exponentiation of the given ciphertext representative 'c', a long
-        between 0 and n-1.
-
-        This is the decryption primitive RSADP described in PKCS#1 v2.1,
-        i.e. RFC 3447 Sect. 5.1.2.
-
-        Input:
-           c: ciphertest representative, a long between 0 and n-1, where
-              n is the key modulus.
-
-        Output:
-           ciphertext representative, a long between 0 and n-1
-
-        Not intended to be used directly. Please, see encrypt() method.
-        """
-
-        n = self.modulus
-        if type(c) is int:
-            c = long(c)
-        if type(c) is not long or c > n-1:
-            warning("Key._rsaep() expects a long between 0 and n-1")
-            return None
-
-        return self.key.decrypt(c)
-
-
-    def _rsaes_pkcs1_v1_5_decrypt(self, C):
-        """
-        Implements RSAES-PKCS1-V1_5-DECRYPT() function described in section
-        7.2.2 of RFC 3447.
-
-        Input:
-           C: ciphertext to be decrypted, an octet string of length k, where
-              k is the length in octets of the RSA modulus n.
-
-        Output:
-           an octet string of length k at most k - 11
-
-        on error, None is returned.
-        """
-
-        # 1) Length checking
-        cLen = len(C)
-        k = self.modulusLen / 8
-        if cLen != k or k < 11:
-            warning("Key._rsaes_pkcs1_v1_5_decrypt() decryption error "
-                    "(cLen != k or k < 11)")
-            return None
-
-        # 2) RSA decryption
-        c = pkcs_os2ip(C)                           # 2.a)
-        m = self._rsadp(c)                          # 2.b)
-        EM = pkcs_i2osp(m, k)                       # 2.c)
-
-        # 3) EME-PKCS1-v1_5 decoding
-
-        # I am aware of the note at the end of 7.2.2 regarding error
-        # conditions reporting but the one provided below are for _local_
-        # debugging purposes. --arno
-
-        if EM[0] != '\x00':
-            warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
-                    "(first byte is not 0x00)")
-            return None
-
-        if EM[1] != '\x02':
-            warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
-                    "(second byte is not 0x02)")
-            return None
-
-        tmp = EM[2:].split('\x00', 1)
-        if len(tmp) != 2:
-            warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
-                    "(no 0x00 to separate PS from M)")
-            return None
-
-        PS, M = tmp
-        if len(PS) < 8:
-            warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
-                    "(PS is less than 8 byte long)")
-            return None
-
-        return M                                    # 4)
-
-
-    def _rsaes_oaep_decrypt(self, C, h=None, mgf=None, L=None):
-        """
-        Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
-        7.1.2 of RFC 3447. Not intended to be used directly. Please, see
-        encrypt() method for type "OAEP".
-
-
-        Input:
-           C  : ciphertext to be decrypted, an octet string of length k, where
-                k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
-                and hLen the length in octets of the hash function output)
-           h  : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
-                'sha256', 'sha384'). 'sha1' is used if none is provided.
-           mgf: the mask generation function f : seed, maskLen -> mask
-           L  : optional label whose association with the message is to be
-                verified; the default value for L, if not provided is the empty
-                string.
-
-        Output:
-           message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
-
-        On error, None is returned.
-        """
-        # The steps below are the one described in Sect. 7.1.2 of RFC 3447.
-
-        # 1) Length Checking
-                                                    # 1.a) is not done
-        if h is None:
-            h = "sha1"
-        if not _hashFuncParams.has_key(h):
-            warning("Key._rsaes_oaep_decrypt(): unknown hash function %s.", h)
-            return None
-        hLen = _hashFuncParams[h][0]
-        hFun = _hashFuncParams[h][1]
-        k = self.modulusLen / 8
-        cLen = len(C)
-        if cLen != k:                               # 1.b)
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(cLen != k)")
-            return None
-        if k < 2*hLen + 2:
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(k < 2*hLen + 2)")
-            return None
-
-        # 2) RSA decryption
-        c = pkcs_os2ip(C)                           # 2.a)
-        m = self._rsadp(c)                          # 2.b)
-        EM = pkcs_i2osp(m, k)                       # 2.c)
-
-        # 3) EME-OAEP decoding
-        if L is None:                               # 3.a)
-            L = ""
-        lHash = hFun(L)
-        Y = EM[:1]                                  # 3.b)
-        if Y != '\x00':
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(Y is not zero)")
-            return None
-        maskedSeed = EM[1:1+hLen]
-        maskedDB = EM[1+hLen:]
-        if mgf is None:
-            mgf = lambda x,y: pkcs_mgf1(x, y, h)
-        seedMask = mgf(maskedDB, hLen)              # 3.c)
-        seed = strxor(maskedSeed, seedMask)         # 3.d)
-        dbMask = mgf(seed, k - hLen - 1)            # 3.e)
-        DB = strxor(maskedDB, dbMask)               # 3.f)
-
-        # I am aware of the note at the end of 7.1.2 regarding error
-        # conditions reporting but the one provided below are for _local_
-        # debugging purposes. --arno
-
-        lHashPrime = DB[:hLen]                      # 3.g)
-        tmp = DB[hLen:].split('\x01', 1)
-        if len(tmp) != 2:
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(0x01 separator not found)")
-            return None
-        PS, M = tmp
-        if PS != '\x00'*len(PS):
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(invalid padding string)")
-            return None
-        if lHash != lHashPrime:
-            warning("Key._rsaes_oaep_decrypt(): decryption error. "
-                    "(invalid hash)")
-            return None
-        return M                                    # 4)
-
-
-    def decrypt(self, C, t=None, h=None, mgf=None, L=None):
-        """
-        Decrypt ciphertext 'C' using 't' decryption scheme where 't' can be:
-
-        - None: the ciphertext 'C' is directly applied the RSADP decryption
-                primitive, as described in PKCS#1 v2.1, i.e. RFC 3447
-                Sect 5.1.2. Simply, put the message undergo a modular
-                exponentiation using the private key. Additionnal method
-                parameters are just ignored.
-
-        - 'pkcs': the ciphertext 'C' is applied RSAES-PKCS1-V1_5-DECRYPT
-                decryption scheme as described in section 7.2.2 of RFC 3447.
-                In that context, other parameters ('h', 'mgf', 'l') are not
-                used.
-
-        - 'oaep': the ciphertext 'C' is applied the RSAES-OAEP-DECRYPT decryption
-                scheme, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
-                7.1.2. In that context,
-
-                o 'h' parameter provides the name of the hash method to use.
-                  Possible values are "md2", "md4", "md5", "sha1", "tls",
-                  "sha224", "sha256", "sha384" and "sha512". if none is provided,
-                  sha1 is used by default.
-
-                o 'mgf' is the mask generation function. By default, mgf
-                  is derived from the provided hash function using the
-                  generic MGF1 (see pkcs_mgf1() for details).
-
-                o 'L' is the optional label to be associated with the
-                  message. If not provided, the default value is used, i.e
-                  the empty string. No check is done on the input limitation
-                  of the hash function regarding the size of 'L' (for
-                  instance, 2^61 - 1 for SHA-1). You have been warned.
-        """
-        if t is None:
-            C = pkcs_os2ip(C)
-            c = self._rsadp(C)
-            l = int(math.ceil(math.log(c, 2) / 8.)) # Hack
-            return pkcs_i2osp(c, l)
-
-        elif t == "pkcs":
-            return self._rsaes_pkcs1_v1_5_decrypt(C)
-
-        elif t == "oaep":
-            return self._rsaes_oaep_decrypt(C, h, mgf, L)
-
-        else:
-            warning("Key.decrypt(): Unknown decryption type (%s) provided" % t)
-            return None
-
-    ### Below are signature related methods. Verification ones are inherited from
-    ### PubKey
-
-    def _rsasp1(self, m):
-        """
-        Internal method providing raw RSA signature, i.e. simple modular
-        exponentiation of the given message representative 'm', an integer
-        between 0 and n-1.
-
-        This is the signature primitive RSASP1 described in PKCS#1 v2.1,
-        i.e. RFC 3447 Sect. 5.2.1.
-
-        Input:
-           m: message representative, an integer between 0 and n-1, where
-              n is the key modulus.
-
-        Output:
-           signature representative, an integer between 0 and n-1
-
-        Not intended to be used directly. Please, see sign() method.
-        """
-        return self._rsadp(m)
-
-
-    def _rsassa_pss_sign(self, M, h=None, mgf=None, sLen=None):
-        """
-        Implements RSASSA-PSS-SIGN() function described in Sect. 8.1.1 of
-        RFC 3447.
-
-        Input:
-           M: message to be signed, an octet string
-
-        Output:
-           signature, an octet string of length k, where k is the length in
-           octets of the RSA modulus n.
-
-        On error, None is returned.
-        """
-
-        # Set default parameters if not provided
-        if h is None: # By default, sha1
-            h = "sha1"
-        if not _hashFuncParams.has_key(h):
-            warning("Key._rsassa_pss_sign(): unknown hash function "
-                    "provided (%s)" % h)
-            return None
-        if mgf is None: # use mgf1 with underlying hash function
-            mgf = lambda x,y: pkcs_mgf1(x, y, h)
-        if sLen is None: # use Hash output length (A.2.3 of RFC 3447)
-            hLen = _hashFuncParams[h][0]
-            sLen = hLen
-
-        # 1) EMSA-PSS encoding
-        modBits = self.modulusLen
-        k = modBits / 8
-        EM = pkcs_emsa_pss_encode(M, modBits - 1, h, mgf, sLen)
-        if EM is None:
-            warning("Key._rsassa_pss_sign(): unable to encode")
-            return None
-
-        # 2) RSA signature
-        m = pkcs_os2ip(EM)                          # 2.a)
-        s = self._rsasp1(m)                         # 2.b)
-        S = pkcs_i2osp(s, k)                        # 2.c)
-
-        return S                                    # 3)
-
-
-    def _rsassa_pkcs1_v1_5_sign(self, M, h):
-        """
-        Implements RSASSA-PKCS1-v1_5-SIGN() function as described in
-        Sect. 8.2.1 of RFC 3447.
-
-        Input:
-           M: message to be signed, an octet string
-           h: hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls'
-                'sha256', 'sha384').
-
-        Output:
-           the signature, an octet string.
-        """
-
-        # 1) EMSA-PKCS1-v1_5 encoding
-        k = self.modulusLen / 8
-        EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)
-        if EM is None:
-            warning("Key._rsassa_pkcs1_v1_5_sign(): unable to encode")
-            return None
-
-        # 2) RSA signature
-        m = pkcs_os2ip(EM)                          # 2.a)
-        s = self._rsasp1(m)                         # 2.b)
-        S = pkcs_i2osp(s, k)                        # 2.c)
-
-        return S                                    # 3)
-
-
-    def sign(self, M, t=None, h=None, mgf=None, sLen=None):
-        """
-        Sign message 'M' using 't' signature scheme where 't' can be:
-
-        - None: the message 'M' is directly applied the RSASP1 signature
-                primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
-                5.2.1. Simply put, the message undergo a modular exponentiation
-                using the private key. Additionnal method parameters are just
-                ignored.
-
-        - 'pkcs': the message 'M' is applied RSASSA-PKCS1-v1_5-SIGN signature
-                scheme as described in Sect. 8.2.1 of RFC 3447. In that context,
-                the hash function name is passed using 'h'. Possible values are
-                "md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
-                and "sha512". If none is provided, sha1 is used. Other additionnal
-                parameters are ignored.
-
-        - 'pss' : the message 'M' is applied RSASSA-PSS-SIGN signature scheme as
-                described in Sect. 8.1.1. of RFC 3447. In that context,
-
-                o 'h' parameter provides the name of the hash method to use.
-                   Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
-                   "sha256", "sha384" and "sha512". if none is provided, sha1
-                   is used.
-
-                o 'mgf' is the mask generation function. By default, mgf
-                   is derived from the provided hash function using the
-                   generic MGF1 (see pkcs_mgf1() for details).
-
-                o 'sLen' is the length in octet of the salt. You can overload the
-                  default value (the octet length of the hash value for provided
-                  algorithm) by providing another one with that parameter.
-        """
-
-        if t is None: # RSASP1
-            M = pkcs_os2ip(M)
-            n = self.modulus
-            if M > n-1:
-                warning("Message to be signed is too long for key modulus")
-                return None
-            s = self._rsasp1(M)
-            if s is None:
-                return None
-            return pkcs_i2osp(s, self.modulusLen/8)
-
-        elif t == "pkcs": # RSASSA-PKCS1-v1_5-SIGN
-            if h is None:
-                h = "sha1"
-            return self._rsassa_pkcs1_v1_5_sign(M, h)
-
-        elif t == "pss": # RSASSA-PSS-SIGN
-            return self._rsassa_pss_sign(M, h, mgf, sLen)
-
-        else:
-            warning("Key.sign(): Unknown signature type (%s) provided" % t)
-            return None
-
-class Key(_DecryptAndSignMethods, _EncryptAndVerify):
-
-    def __init__(self, pem_data):
-        self.key = RSA.importKey(pem_data)
-        self.modulus = self.key.key.n
-        self.modulusLen = self.key.key.size() + 1
-        self.privExp = self.key.key.d
-        self.pubExp = self.key.key.e
-        self.prime1 = self.key.key.p
-        self.prime2 = self.key.key.q
-        self.exponent1 = 0
-        self.exponent2 = 0
-        self.coefficient = self.key.key.u
diff --git a/src/test/voltha/__init__.py b/src/test/voltha/__init__.py
deleted file mode 100644
index 88eb0c5..0000000
--- a/src/test/voltha/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# 
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-# http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
diff --git a/src/test/voltha/volthaTest.json b/src/test/voltha/volthaTest.json
deleted file mode 100644
index 6594e73..0000000
--- a/src/test/voltha/volthaTest.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "OLT_TYPE" : "simulated_olt",
-    "OLT_MAC"  : "00:0c:e2:31:12:00",
-    "VOLTHA_HOST" : "172.17.0.1",
-    "VOLTHA_REST_PORT" : 8882,
-    "VOLTHA_UPLINK_VLAN_START" : 444
-}
diff --git a/src/test/voltha/volthaTest.py b/src/test/voltha/volthaTest.py
deleted file mode 100644
index 39de312..0000000
--- a/src/test/voltha/volthaTest.py
+++ /dev/null
@@ -1,6534 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import sys
-import unittest
-import time, monotonic
-import json
-import requests
-import threading
-from IGMP import *
-from random import randint
-from threading import Timer
-from threadPool import ThreadPool
-from nose.tools import *
-from nose.twistedtools import reactor, deferred
-from twisted.internet import defer
-from CordTestConfig import setup_module, teardown_module
-from CordTestUtils import get_mac, log_test
-from VolthaCtrl import VolthaCtrl, VolthaService, voltha_setup, voltha_teardown
-from CordTestUtils import log_test, get_controller
-from portmaps import g_subscriber_port_map
-from OltConfig import *
-from EapTLS import TLSAuthTest
-from Channels import Channels, IgmpChannel
-from Stats import Stats
-from DHCP import DHCPTest
-from OnosCtrl import OnosCtrl
-from CordLogger import CordLogger
-from scapy.all import *
-from scapy_ssl_tls.ssl_tls import *
-from scapy_ssl_tls.ssl_tls_crypto import *
-from CordTestServer import cord_test_onos_restart, cord_test_shell, cord_test_radius_restart
-from CordContainer import Onos
-
-
-class Voltha_olt_subscribers(Channels):
-
-      STATS_RX = 0
-      STATS_TX = 1
-      STATS_JOIN = 2
-      STATS_LEAVE = 3
-
-      def __init__(self, tx_port, rx_port, num_channels =1, channel_start = 0, src_list = None):
-          self.tx_port = tx_port
-          self.rx_port = rx_port
-          self.src_list = src_list
-          self.num_channels = num_channels
-          try:
-              self.tx_intf = tx_port
-              self.rx_intf = rx_port
-          except:
-              self.tx_intf = self.INTF_TX_DEFAULT
-              self.rx_intf = self.INTF_RX_DEFAULT
-#          num = 1
-#          channel_start = 0
-          mcast_cb = None
-          Channels.__init__(self, num_channels, channel_start = channel_start, src_list = src_list,
-                              iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
-
-          self.loginType  = 'wireless'
-          ##start streaming channels
-          self.join_map = {}
-          ##accumulated join recv stats
-          self.join_rx_stats = Stats()
-          self.recv_timeout = False
-
-
-      def channel_join_update(self, chan, join_time):
-            self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
-            self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
-
-      def channel_join(self, chan = 0, delay = 2, src_list = None, record_type = None):
-            '''Join a channel and create a send/recv stats map'''
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.delay = delay
-            chan, join_time = self.join(chan, src_list = src_list, record_type = record_type)
-            #chan, join_time = self.join(chan)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_join_next(self, delay = 2, src_list = None, leave_flag = True):
-            '''Joins the next channel leaving the last channel'''
-            if self.last_chan:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.join_next(src_list = src_list, leave_flag = leave_flag)
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_jump(self, delay = 2):
-            '''Jumps randomly to the next channel leaving the last channel'''
-            if self.last_chan is not None:
-                  if self.join_map.has_key(self.last_chan):
-                        del self.join_map[self.last_chan]
-            self.delay = delay
-            chan, join_time = self.jump()
-            self.channel_join_update(chan, join_time)
-            return chan
-
-      def channel_leave(self, chan = 0, force = False, src_list = None):
-            if self.join_map.has_key(chan):
-                  del self.join_map[chan]
-            self.leave(chan, force = force, src_list = src_list)
-
-      def channel_update(self, chan, stats_type, packets, t=0):
-            if type(chan) == type(0):
-                  chan_list = (chan,)
-            else:
-                  chan_list = chan
-            for c in chan_list:
-                  if self.join_map.has_key(c):
-                        self.join_map[c][stats_type].update(packets = packets, t = t)
-
-      def channel_receive(self, chan, cb = None, count = 1, timeout = 5, src_list = None):
-            log_test.info('Subscriber on port %s checking data traffic receiving from group %s, channel %d' %
-                     (self.rx_intf, self.gaddr(chan), chan))
-            r = self.recv(chan, cb = cb, count = count, timeout = timeout, src_list = src_list)
-            if len(r) == 0:
-                  log_test.info('Subscriber on port %s timed out' %( self.rx_intf))
-                  self.test_status = False
-            else:
-                  self.test_status = True
-                  pass
-#                  log_test.info('Subscriber on port %s received %d packets' %(self.rx_intf, len(r)))
-            if self.recv_timeout:
-                  ##Negative test case is disabled for now
-                  log_test.info('Subscriber on port %s not received %d packets' %(self.rx_intf, len(r)))
-                  assert_equal(len(r), 0)
-                  self.test_status = True
-            return self.test_status
-
-      def channel_not_receive(self, chan, cb = None, count = 1, timeout = 5, src_list = None):
-            log_test.info('Subscriber on port %s checking data traffic receiving from group %s, channel %d' %
-                     (self.rx_intf, self.gaddr(chan), chan))
-            r = self.not_recv(chan, cb = cb, count = count, timeout = timeout, src_list = src_list)
-            if len(r) == 0:
-                  log_test.info('Subscriber on port %s timed out' %( self.rx_intf))
-                  self.test_status = True
-            else:
-                  self.test_status = False
-                  pass
-#                  log_test.info('Subscriber on port %s received %d packets' %(self.rx_intf, len(r)))
-            if self.recv_timeout:
-                  ##Negative test case is disabled for now
-                  log_test.info('Subscriber on port %s not received %d packets' %(self.rx_intf, len(r)))
-                  assert_equal(len(r), 0)
-                  self.test_status = True
-            return self.test_status
-
-      def recv_channel_cb(self, pkt, src_list = None):
-
-            ##First verify that we have received the packet for the joined instance
-            log_test.info('Packet received for group %s, subscriber, port %s and from source ip %s showing full packet %s'%
-                     (pkt[IP].dst, self.rx_intf, pkt[IP].src, pkt.show))
-            if src_list is not None:
-               for i in src_list:
-                   if pkt[IP].src == src_list[i]:
-                      pass
-                   else:
-                      log_test.info('Packet received for group %s, subscriber, port %s and from source ip %s which is not expcted on that port'%
-                                                    (pkt[IP].dst, self.rx_intf, pkt[IP].src))
-
-                      self.recv_timeout = True
-
-            if self.recv_timeout:
-                  return
-            chan = self.caddr(pkt[IP].dst)
-            assert_equal(chan in self.join_map.keys(), True)
-            recv_time = monotonic.monotonic() * 1000000
-            join_time = self.join_map[chan][self.STATS_JOIN].start
-            delta = recv_time - join_time
-            self.join_rx_stats.update(packets=1, t = delta, usecs = True)
-            self.channel_update(chan, self.STATS_RX, 1, t = delta)
-            log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-
-class voltha_subscriber_pool:
-
-      def __init__(self, subscriber, test_cbs):
-            self.subscriber = subscriber
-            self.test_cbs = test_cbs
-
-      def pool_cb(self):
-            for cb in self.test_cbs:
-                  if cb:
-                        self.test_status = cb(self.subscriber, multiple_sub = True)
-                        if self.test_status is not True:
-                           ## This is chaining for other sub status has to check again
-                           self.test_status = True
-                           log_test.info('This service is failed and other services will not run for this subscriber')
-                           break
-            log_test.info('This Subscriber is tested for multiple service eligibility ')
-            self.test_status = True
-
-class voltha_exchange(unittest.TestCase):
-
-    OLT_TYPE = 'tibit_olt'
-    OLT_MAC = '00:0c:e2:31:12:00'
-    VOLTHA_HOST = VolthaService.DOCKER_HOST_IP
-    VOLTHA_PONSIM_HOST = VolthaService.PONSIM_HOST
-    VOLTHA_REST_PORT = VolthaCtrl.REST_PORT
-    VOLTHA_OLT_TYPE = 'ponsim_olt'
-    VOLTHA_OLT_MAC = '00:0c:e2:31:12:00'
-    VOLTHA_IGMP_ITERATIONS = 100
-    VOLTHA_TEARDOWN = True
-    voltha = None
-    voltha_attrs = None
-    success = True
-    olt_device_id = None
-    apps = ('org.opencord.aaa', 'org.onosproject.dhcp',)
-    #apps = ('org.opencord.aaa', 'org.onosproject.dhcp', 'org.onosproject.dhcprelay')
-    app_dhcp = ('org.onosproject.dhcp',)
-    app_dhcprelay = ('org.onosproject.dhcprelay',)
-    olt_apps = () #'org.opencord.cordmcast')
-    vtn_app = 'org.opencord.vtn'
-    table_app = 'org.ciena.cordigmp'
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    dhcp_data_dir = os.path.join(test_path, '..', 'setup')
-    table_app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-multitable-2.0-SNAPSHOT.oar')
-    app_file = os.path.join(test_path, '..', 'apps/ciena-cordigmp-2.0-SNAPSHOT.oar')
-    olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-1.2-SNAPSHOT.oar')
-    olt_app_name = 'org.onosproject.olt'
-    #onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
-    VOLTHA_AUTO_CONFIGURE = False
-    num_joins = 0
-
-    relay_interfaces_last = ()
-    interface_to_mac_map = {}
-    host_ip_map = {}
-    default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
-    default_options = [ ('subnet-mask', '255.255.255.0'),
-                     ('broadcast-address', '192.168.1.255'),
-                     ('domain-name-servers', '192.168.1.1'),
-                     ('domain-name', '"mydomain.cord-tester"'),
-                   ]
-    ##specify the IP for the dhcp interface matching the subnet and subnet config
-    ##this is done for each interface dhcpd server would be listening on
-    default_subnet_config = [ ('192.168.1.2',
-'''
-subnet 192.168.1.0 netmask 255.255.255.0 {
-    range 192.168.1.10 192.168.1.100;
-}
-'''), ]
-
-    configs = {}
-    VOLTHA_ENABLED  = True
-    INTF_TX_DEFAULT = 'veth2'
-    INTF_RX_DEFAULT = 'veth0'
-    INTF_2_RX_DEFAULT = 'veth6'
-    TESTCASE_TIMEOUT = 300
-    VOLTHA_IGMP_ITERATIONS = 10
-#    VOLTHA_CONFIG_FAKE = True
-    VOLTHA_CONFIG_FAKE = False
-    VOLTHA_UPLINK_VLAN_MAP = { 'of:0000000000000001' : '222' }
-    VOLTHA_UPLINK_VLAN_START = 444
-    VOLTHA_ONU_UNI_PORT = 'veth0'
-
-    dhcp_server_config = {
-       "ip": "10.1.11.50",
-       "mac": "ca:fe:ca:fe:ca:fe",
-       "subnet": "255.255.252.0",
-       "broadcast": "10.1.11.255",
-       "router": "10.1.8.1",
-       "domain": "8.8.8.8",
-       "ttl": "63",
-       "delay": "2",
-       "startip": "10.1.11.51",
-       "endip": "10.1.11.100"
-      }
-
-
-    CLIENT_CERT = """-----BEGIN CERTIFICATE-----
-MIICuDCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwNjA2MjExMjI3WhcN
-MTcwNjAxMjExMjI3WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
-gYEAwvXiSzb9LZ6c7uNziUfKvoHO7wu/uiFC5YUpXbmVGuGZizbVrny0xnR85Dfe
-+9R4diansfDhIhzOUl1XjN3YDeSS9OeF5YWNNE8XDhlz2d3rVzaN6hIhdotBkUjg
-rUewjTg5OFR31QEyG3v8xR3CLgiE9xQELjZbSA07pD79zuUCAwEAAaNPME0wEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5l
-eGFtcGxlLmNvbS9leGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOBgQDAjkrY
-6tDChmKbvr8w6Du/t8vHjTCoCIocHTN0qzWOeb1YsAGX89+TrWIuO1dFyYd+Z0KC
-PDKB5j/ygml9Na+AklSYAVJIjvlzXKZrOaPmhZqDufi+rXWti/utVqY4VMW2+HKC
-nXp37qWeuFLGyR1519Y1d6F/5XzqmvbwURuEug==
------END CERTIFICATE-----"""
-
-    CLIENT_CERT_INVALID = '''-----BEGIN CERTIFICATE-----
-MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
-CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
-IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
-RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
-MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
-BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
-hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
-5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
-tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
-OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
-qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
-2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
-eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
-MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
-VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
-RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
-dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
-T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
-yg==
------END CERTIFICATE-----'''
-
-    @classmethod
-    def update_apps_version(cls):
-        version = Onos.getVersion()
-        major = int(version.split('.')[0])
-        minor = int(version.split('.')[1])
-        cordigmp_app_version = '2.0-SNAPSHOT'
-        olt_app_version = '1.2-SNAPSHOT'
-        if major > 1:
-            cordigmp_app_version = '3.0-SNAPSHOT'
-            olt_app_version = '2.0-SNAPSHOT'
-        elif major == 1:
-            if minor > 10:
-                cordigmp_app_version = '3.0-SNAPSHOT'
-                olt_app_version = '2.0-SNAPSHOT'
-            elif minor <= 8:
-                olt_app_version = '1.1-SNAPSHOT'
-            cls.app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-{}.oar'.format(cordigmp_app_version))
-            cls.table_app_file = os.path.join(cls.test_path, '..', 'apps/ciena-cordigmp-multitable-{}.oar'.format(cordigmp_app_version))
-            cls.olt_app_file = os.path.join(cls.test_path, '..', 'apps/olt-app-{}.oar'.format(olt_app_version))
-
-    @classmethod
-    def voltha_dhcprelay_setUpClass(cls):
-        ''' Activate the dhcprelay app'''
-        OnosCtrl(cls.app_dhcp).deactivate()
-        time.sleep(3)
-        cls.onos_ctrl = OnosCtrl('org.onosproject.dhcprelay')
-        status, _ = cls.onos_ctrl.activate()
-        assert_equal(status, True)
-        time.sleep(3)
-        cls.dhcp_relay_setup()
-        ##start dhcpd initially with default config
-        cls.dhcpd_start()
-
-    @classmethod
-    def voltha_dhcprelay_tearDownClass(cls):
-        '''Deactivate the dhcp relay app'''
-        try:
-            os.unlink('{}/dhcpd.conf'.format(cls.dhcp_data_dir))
-            os.unlink('{}/dhcpd.leases'.format(cls.dhcp_data_dir))
-        except: pass
-        onos_ctrl = OnosCtrl(cls.app_dhcprelay)
-        onos_ctrl.deactivate()
-        cls.dhcpd_stop()
-        cls.dhcp_relay_cleanup()
-
-    @classmethod
-    def onos_load_config(cls, app, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON config request for app %s returned status %d' %(app, code))
-            assert_equal(status, True)
-        time.sleep(2)
-
-    @classmethod
-    def onos_aaa_load(cls):
-        OnosCtrl.aaa_load_config()
-
-    @classmethod
-    def onos_dhcp_table_load(self, config = None):
-        dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
-        dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
-        if config:
-           for k in config.keys():
-               if dhcp_config.has_key(k):
-                  dhcp_config[k] = config[k]
-        self.onos_load_config('org.onosproject.dhcp', dhcp_dict)
-
-    def dhcp_sndrcv(self, dhcp, update_seed = False, mac = None, validation = None):
-        if validation:
-           cip, sip = dhcp.discover(mac = mac, update_seed = update_seed)
-           assert_not_equal(cip, None)
-           assert_not_equal(sip, None)
-           log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                   (cip, sip, dhcp.get_mac(cip)[0]))
-        if validation == False:
-           cip, sip = dhcp.discover(mac = mac, update_seed = update_seed)
-           assert_equal(cip, None)
-           assert_equal(sip, None)
-           log_test.info('Dhcp client did not get IP from server')
-
-        if validation == 'skip':
-           cip, sip = dhcp.discover(mac = mac, update_seed = update_seed)
-
-        return cip,sip
-
-    def dhcp_request(self, onu_iface = None, seed_ip = '10.10.10.1', update_seed = False, validation = None, startip = '10.10.10.20', mac = None):
-        config = {'startip':startip, 'endip':'10.10.10.200',
-                  'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                  'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-        self.onos_dhcp_table_load(config)
-        dhcp = DHCPTest(seed_ip = seed_ip, iface =onu_iface)
-        cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed, validation = validation, mac = mac)
-        return cip, sip
-
-    @classmethod
-    def setUpClass(cls):
-        cls.update_apps_version()
-        cls.voltha_attrs = dict(host = cls.VOLTHA_HOST,
-                                rest_port = cls.VOLTHA_REST_PORT,
-                                uplink_vlan_map = cls.VOLTHA_UPLINK_VLAN_MAP,
-                                uplink_vlan_start = cls.VOLTHA_UPLINK_VLAN_START)
-        cls.voltha = VolthaCtrl(**cls.voltha_attrs)
-        cls.install_app_table()
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, cls.port_list = cls.olt.olt_port_map()
-        cls.switches = cls.port_map['switches']
-        cls.ponsim_ports = cls.port_map['ponsim']
-        cls.num_ports = cls.port_map['num_ports']
-        if cls.num_ports > 1:
-              cls.num_ports -= 1 ##account for the tx port
-        cls.activate_apps(cls.apps + cls.olt_apps, deactivate = True)
-        cls.deactivate_apps(cls.app_dhcprelay)
-        cls.onos_aaa_load()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the olt apps and restart OVS back'''
-        apps = cls.olt_apps + ( cls.table_app,)
-        for app in apps:
-            onos_ctrl = OnosCtrl(app)
-            onos_ctrl.deactivate()
-        cls.deactivate_apps(cls.app_dhcprelay)
-        cls.install_app_igmp()
-        log_test.info('TearDownClass Restarting the Radius Server in the TA setup')
-        cord_test_radius_restart()
-
-    @classmethod
-    def install_app_igmp(cls):
-        ##Uninstall the table app on class exit
-        OnosCtrl.uninstall_app(cls.table_app)
-        time.sleep(2)
-        log_test.info('Installing back the cord igmp app %s for subscriber test on exit' %(cls.app_file))
-        OnosCtrl.install_app(cls.app_file)
-
-    def remove_olt(self, switch_map):
-        controller = get_controller()
-        auth = ('karaf', 'karaf')
-        #remove subscriber for every port on all the voltha devices
-        for device, device_map in switch_map.iteritems():
-            uni_ports = device_map['ports']
-            uplink_vlan = device_map['uplink_vlan']
-            for port in uni_ports:
-                rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}'.format(controller,
-                                                                         device,
-                                                                         port)
-                resp = requests.delete(rest_url, auth = auth)
-                if resp.status_code not in [204, 202, 200]:
-                      log_test.error('Error deleting subscriber for device %s on port %s' %(device, port))
-                else:
-                      log_test.info('Deleted subscriber for device %s on port  %s' %(device, port))
-        OnosCtrl.uninstall_app(self.olt_app_file)
-
-    def config_olt(self, switch_map):
-        controller = get_controller()
-        auth = ('karaf', 'karaf')
-        #configure subscriber for every port on all the voltha devices
-        for device, device_map in switch_map.iteritems():
-            uni_ports = device_map['ports']
-            uplink_vlan = device_map['uplink_vlan']
-            for port in uni_ports:
-                vlan = port
-                rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}/{}'.format(controller,
-                                                                            device,
-                                                                            port,
-                                                                            vlan)
-                resp = requests.post(rest_url, auth = auth)
-                #assert_equal(resp.ok, True)
-
-    def voltha_uni_port_toggle(self, uni_port = None):
-        ## Admin state of port is down and up
-        if not uni_port:
-           uni_port = self.INTF_RX_DEFAULT
-        cmd = 'ifconfig {} down'.format(uni_port)
-        os.system(cmd)
-        log_test.info('Admin state of uni_port is down')
-        time.sleep(30)
-        cmd = 'ifconfig {} up'.format(uni_port)
-        os.system(cmd)
-        log_test.info('Admin state of uni_port is up now')
-        time.sleep(30)
-        return
-
-    @classmethod
-    def install_app_table(cls):
-        ##Uninstall the existing app if any
-        OnosCtrl.uninstall_app(cls.table_app)
-        time.sleep(2)
-        log_test.info('Installing the multi table app %s for subscriber test' %(cls.table_app_file))
-        OnosCtrl.install_app(cls.table_app_file)
-        time.sleep(3)
-
-    @classmethod
-    def activate_apps(cls, apps, deactivate = False):
-        for app in apps:
-            onos_ctrl = OnosCtrl(app)
-            if deactivate is True:
-               onos_ctrl.deactivate()
-               time.sleep(2)
-               log_test.info('Activating app %s' %app)
-               status, _ = onos_ctrl.activate()
-               assert_equal(status, True)
-               time.sleep(2)
-
-    @classmethod
-    def deactivate_apps(cls, apps):
-        cls.success = True
-        for app in apps:
-            onos_ctrl = OnosCtrl(app)
-            log_test.info('Deactivating app %s' %app)
-            status, _ = onos_ctrl.deactivate()
-            if status is False:
-               cls.success = False
-    #        assert_equal(status, True)
-            time.sleep(2)
-
-    def random_ip(self,start_ip = '10.10.10.20', end_ip = '10.10.10.65'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    def random_mcast_ip(self,start_ip = '224.0.1.0', end_ip = '224.0.1.100'):
-        start = list(map(int, start_ip.split(".")))
-        end = list(map(int, end_ip.split(".")))
-        temp = start
-        ip_range = []
-        ip_range.append(start_ip)
-        while temp != end:
-            start[3] += 1
-            for i in (3, 2, 1):
-                if temp[i] == 255:
-                    temp[i] = 0
-                    temp[i-1] += 1
-            ip_range.append(".".join(map(str, temp)))
-        return random.choice(ip_range)
-
-    @classmethod
-    def dhcp_relay_setup(cls):
-        did = OnosCtrl.get_device_id()
-        cls.relay_device_id = did
-        cls.relay_device_id = 'of:0000000000000001'
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if cls.port_map:
-            ##Per subscriber, we use 1 relay port
-            try:
-                relay_port = cls.port_map[cls.port_map['relay_ports'][0]]
-            except:
-                relay_port = cls.port_map['uplink']
-            cls.relay_interface_port = relay_port
-            cls.relay_interfaces = (cls.port_map[cls.relay_interface_port],)
-        else:
-            cls.relay_interface_port = 100
-            cls.relay_interfaces = (g_subscriber_port_map[cls.relay_interface_port],)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        if cls.port_map:
-            ##generate a ip/mac client virtual interface config for onos
-            interface_list = []
-            for port in cls.port_map['ports']:
-                port_num = cls.port_map[port]
-                if port_num == cls.port_map['uplink']:
-                    continue
-                ip = cls.get_host_ip(port_num)
-                mac = cls.get_mac(port)
-                interface_list.append((port_num, ip, mac))
-
-            #configure dhcp server virtual interface on the same subnet as first client interface
-            relay_ip = cls.get_host_ip(interface_list[0][0])
-            relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
-            interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
-            cls.onos_interface_load(interface_list)
-
-    @classmethod
-    def onos_interface_load(cls, interface_list):
-        interface_dict = { 'ports': {} }
-        for port_num, ip, mac in interface_list:
-            port_map = interface_dict['ports']
-            port = '{}/{}'.format(cls.relay_device_id, port_num)
-            port_map[port] = { 'interfaces': [] }
-            interface_list = port_map[port]['interfaces']
-            interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
-                              'mac' : mac,
-                              'name': 'vir-{}'.format(port_num)
-                            }
-            interface_list.append(interface_map)
-
-        cls.onos_load_config('org.onosproject.dhcprelay', interface_dict)
-        cls.configs['interface_config'] = interface_dict
-
-    @classmethod
-    def get_host_ip(cls, port):
-        if cls.host_ip_map.has_key(port):
-            return cls.host_ip_map[port]
-        cls.host_ip_map[port] = '192.168.1.{}'.format(port)
-        return cls.host_ip_map[port]
-
-    @classmethod
-    def host_load(cls, iface):
-        '''Have ONOS discover the hosts for dhcp-relay responses'''
-        port = g_subscriber_port_map[iface]
-        host = '173.17.1.{}'.format(port)
-        cmds = ( 'ifconfig {} 0'.format(iface),
-                 'ifconfig {0} {1}'.format(iface, host),
-                 'arping -I {0} {1} -c 2'.format(iface, host),
-                 'ifconfig {} 0'.format(iface), )
-        for c in cmds:
-            os.system(c)
-
-    @classmethod
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    @classmethod
-    def dhcpd_start(cls, intf_list = None,
-                    config = default_config, options = default_options,
-                    subnet = default_subnet_config):
-        '''Start the dhcpd server by generating the conf file'''
-        if intf_list is None:
-            intf_list = cls.relay_interfaces
-        ##stop dhcpd if already running
-        cls.dhcpd_stop()
-        dhcp_conf = cls.dhcpd_conf_generate(config = config, options = options,
-                                            subnet = subnet)
-        ##first touch dhcpd.leases if it doesn't exist
-        lease_file = '{}/dhcpd.leases'.format(cls.dhcp_data_dir)
-        if os.access(lease_file, os.F_OK) is False:
-            with open(lease_file, 'w') as fd: pass
-
-        conf_file = '{}/dhcpd.conf'.format(cls.dhcp_data_dir)
-        with open(conf_file, 'w') as fd:
-            fd.write(dhcp_conf)
-
-        #now configure the dhcpd interfaces for various subnets
-        index = 0
-        intf_info = []
-        for ip,_ in subnet:
-            intf = intf_list[index]
-            mac = cls.get_mac(intf)
-            intf_info.append((ip, mac))
-            index += 1
-            os.system('ifconfig {} {}'.format(intf, ip))
-
-        intf_str = ','.join(intf_list)
-        dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format(conf_file, lease_file, intf_str)
-        log_test.info('Starting DHCPD server with command: %s' %dhcpd_cmd)
-        ret = os.system(dhcpd_cmd)
-        assert_equal(ret, 0)
-        time.sleep(3)
-        cls.relay_interfaces_last = cls.relay_interfaces
-        cls.relay_interfaces = intf_list
-        cls.onos_dhcp_relay_load_1(*intf_info[0])
-
-    @classmethod
-    def dhcpd_stop(cls):
-        os.system('pkill -9 dhcpd')
-        for intf in cls.relay_interfaces:
-            os.system('ifconfig {} 0'.format(intf))
-
-        cls.relay_interfaces = cls.relay_interfaces_last
-
-    @classmethod
-    def get_mac(cls, iface):
-        if cls.interface_to_mac_map.has_key(iface):
-            return cls.interface_to_mac_map[iface]
-        mac = get_mac(iface, pad = 0)
-        cls.interface_to_mac_map[iface] = mac
-        return mac
-
-    def send_recv(self, mac=None, update_seed = False, validate = True, dhcp_obj = None):
-        if dhcp_obj is None:
-           dhcp_obj =  self.dhcp
-        cip, sip = dhcp_obj.discover(mac = mac, update_seed = update_seed)
-        if validate:
-            assert_not_equal(cip, None)
-            assert_not_equal(sip, None)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s' %
-                (cip, sip, dhcp_obj.get_mac(cip)[0]))
-        return cip,sip
-
-    def send_recv_function_in_thread(self, mac=None, update_seed = False, validate = False, dhcp_obj = None):
-        self.success = True
-        cip, sip = self.send_recv(mac=mac,update_seed=update_seed,validate=validate,dhcp_obj = dhcp_obj)
-        if cip is None or sip is None:
-           self.success = False
-
-    @classmethod
-    def dhcpd_conf_generate(cls, config = default_config, options = default_options,
-                            subnet = default_subnet_config):
-        conf = ''
-        for k, v in config.items():
-            conf += '{} {};\n'.format(k, v)
-
-        opts = ''
-        for k, v in options:
-            opts += 'option {} {};\n'.format(k, v)
-
-        subnet_config = ''
-        for _, v in subnet:
-            subnet_config += '{}\n'.format(v)
-
-        return '{}{}{}'.format(conf, opts, subnet_config)
-
-    @classmethod
-    def onos_dhcp_relay_load_1(cls, server_ip, server_mac):
-        relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
-        dhcp_dict = {'apps':{'org.onosproject.dhcp-relay':{'dhcprelay':
-                                                          {'dhcpserverConnectPoint':relay_device_map,
-                                                           'serverip':server_ip,
-                                                           'servermac':server_mac
-                                                           }
-                                                           }
-                             }
-                     }
-        cls.onos_load_config(cls.app_dhcprelay,dhcp_dict)
-        cls.configs['relay_config'] = dhcp_dict
-
-    @classmethod
-    def dhcp_relay_cleanup(cls):
-        ##reset the ONOS port configuration back to default
-        for config in cls.configs.items():
-            OnosCtrl.delete(config)
-        # if cls.onos_restartable is True:
-        #     log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
-        #     return cord_test_onos_restart(config = {})
-
-
-    def tls_flow_check(self, olt_ports, cert_info = None, multiple_sub = False):
-        if multiple_sub is True:
-           olt_nni_port = olt_ports.tx_port
-           olt_uni_port = olt_ports.rx_port
-        else:
-           olt_uni_port = olt_ports
-
-        def tls_fail_cb():
-             log_test.info('TLS verification failed')
-        if cert_info is None:
-           tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = olt_uni_port)
-           log_test.info('Running subscriber %s tls auth test with valid TLS certificate' %olt_uni_port)
-           tls.runTest()
-           if tls.failTest is True:
-              self.success = False
-           assert_equal(tls.failTest, False)
-        if cert_info == "no_cert":
-           tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = olt_uni_port, client_cert = '')
-           log_test.info('Running subscriber %s tls auth test with no TLS certificate' %olt_uni_port)
-           tls.runTest()
-           if tls.failTest is False:
-              self.success = False
-           assert_equal(tls.failTest, True)
-        if cert_info == "invalid_cert":
-           tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = olt_uni_port, client_cert = self.CLIENT_CERT_INVALID)
-           log_test.info('Running subscriber %s tls auth test with invalid TLS certificate' %olt_uni_port)
-           tls.runTest()
-           if tls.failTest is False:
-              self.success = False
-           assert_equal(tls.failTest, True)
-        if cert_info == "same_cert":
-           tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = olt_uni_port)
-           log_test.info('Running subscriber %s tls auth test with same valid TLS certificate' %olt_uni_port)
-           tls.runTest()
-           if tls.failTest is True:
-              self.success = False
-           assert_equal(tls.failTest, False)
-        if cert_info == "app_deactivate" or cert_info == "restart_radius" or cert_info == "disable_olt_device" or \
-           cert_info == "uni_port_admin_down" or cert_info == "restart_olt_device" or cert_info == "restart_onu_device":
-           tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = olt_uni_port, client_cert = self.CLIENT_CERT_INVALID)
-           log_test.info('Running subscriber %s tls auth test with %s' %(olt_uni_port,cert_info))
-           tls.runTest()
-           if tls.failTest is False:
-              self.success = False
-           assert_equal(tls.failTest, True)
-        self.test_status = True
-        return self.test_status
-
-    def dhcp_flow_check(self, olt_ports, negative_test = None, multiple_sub = False):
-        if multiple_sub is True:
-           olt_nni_port = olt_ports.tx_port
-           onu_iface = olt_ports.rx_port
-           dhcp_server_startip = self.random_ip()
-           random_mac = '00:00:00:0a:0a:' + hex(random.randrange(50,254)).split('x')[1]
-        else:
-          onu_iface = olt_ports
-          dhcp_server_startip = '10.10.10.20'
-          random_mac = None
-        self.success = True
-
-        if negative_test is None:
-           cip, sip = self.dhcp_request(onu_iface, update_seed = True, validation = 'skip', startip = dhcp_server_startip, mac = random_mac)
-           if cip == None or sip == None:
-              self.success = False
-              self.test_status = False
-              assert_not_equal(cip,None)
-              assert_not_equal(sip,None)
-           else:
-              log_test.info('Subscriber %s client ip %s from server %s' %(onu_iface, cip, sip))
-              self.test_status = True
-
-        if negative_test == "interrupting_dhcp_flows":
-           cip, sip = self.dhcp_request(onu_iface, update_seed = True, validation = False)
-           if cip is not None:
-              self.success =  False
-           assert_equal(cip,None)
-           log_test.info('Subscriber %s not got client ip %s from server' %(onu_iface, cip))
-           self.test_status = True
-
-        if negative_test == "invalid_src_mac_broadcast":
-           config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                     'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
-
-           if cip is not None:
-              self.success =  False
-           log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected self.success = %s '%self.success)
-           assert_equal(cip,None)
-           log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-           self.test_status = True
-
-        if negative_test == "invalid_src_mac_multicast":
-           config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                     'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:91:02:e4')
-           if cip is not None:
-              self.success =  False
-           assert_equal(cip,None)
-           log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-           self.test_status = True
-
-        if negative_test == "invalid_src_mac_junk":
-           config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                     'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
-           if cip is not None:
-              self.success =  False
-           assert_equal(cip,None)
-           log_test.info('ONOS dhcp server rejected client discover with invalid source mac as expected')
-           self.test_status = True
-
-        if negative_test == "request_release":
-           config = {'startip':'10.10.100.20', 'endip':'10.10.100.230',
-                     'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = onu_iface)
-           cip, sip = self.dhcp_sndrcv(self.dhcp)
-           log_test.info('Releasing ip %s to server %s' %(cip, sip))
-           if not self.dhcp.release(cip):
-              self.success =  False
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           cip2, sip2 = self.dhcp_sndrcv(self.dhcp, update_seed = True)
-           log_test.info('Verifying released IP was given back on rediscover')
-           if not cip == cip2:
-              self.success =  False
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-           self.test_status = True
-
-        if negative_test == "starvation_positive":
-           config = {'startip':'193.170.1.20', 'endip':'193.170.1.69',
-                     'ip':'193.170.1.2', 'mac': "ca:fe:c2:fe:cc:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'192.168.1.255', 'router': '192.168.1.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '192.169.1.1', iface = onu_iface)
-           ip_map = {}
-           for i in range(10):
-               cip, sip = self.dhcp_sndrcv(self.dhcp, update_seed = True)
-               if ip_map.has_key(cip):
-                  self.success =  False
-                  log_test.info('IP %s given out multiple times' %cip)
-                  assert_equal(False, ip_map.has_key(cip))
-               ip_map[cip] = sip
-           self.test_status = True
-
-        if negative_test == "starvation_negative":
-           config = {'startip':'182.17.0.20', 'endip':'182.17.0.69',
-                     'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = onu_iface)
-           log_test.info('Verifying passitive case')
-           for x in xrange(50):
-               mac = RandMAC()._fix()
-               self.dhcp_sndrcv(self.dhcp,mac = mac)
-           log_test.info('Verifying negative case')
-           cip, sip = self.dhcp_sndrcv(self.dhcp,update_seed = True)
-           if cip or sip is not None:
-              self.success = False
-           assert_equal(cip, None)
-           assert_equal(sip, None)
-           self.test_status = True
-           self.success =  True
-
-        if negative_test == "multiple_discover":
-           config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                     'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover()
-           log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
-                       (cip, sip, mac) )
-           if cip is None:
-              self.success = False
-           assert_not_equal(cip, None)
-           log_test.info('Triggering DHCP discover again.')
-           new_cip, new_sip, new_mac, _ = self.dhcp.only_discover()
-           if not new_cip == cip:
-              self.success = False
-           assert_equal(new_cip, cip)
-           log_test.info('client got same IP as expected when sent 2nd discovery')
-           self.test_status = True
- #          self.success =  True
-        if negative_test == "multiple_requests":
-           config = {'startip':'10.10.10.20', 'endip':'10.10.10.69',
-                     'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = onu_iface)
-           log_test.info('Sending DHCP discover and DHCP request.')
-           cip, sip = self.dhcp_sndrcv(self.dhcp,update_seed = True)
-           mac = self.dhcp.get_mac(cip)[0]
-           log_test.info("Sending DHCP request again.")
-           new_cip, new_sip = self.dhcp.only_request(cip, mac)
-           assert_equal(new_cip,cip)
-           log_test.info('server offered same IP to clain for multiple requests, as expected')
-           self.test_status = True
-#           self.success =  True
-        if negative_test == "desired_ip_address":
-           config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                     'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '20.20.20.50', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-           if cip or sip is None:
-              self.success = False
-           assert_not_equal(cip, None)
-           log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                      (cip, sip, mac))
-           if not self.dhcp.seed_ip == cip:
-              self.success = False
-           assert_equal(cip,self.dhcp.seed_ip)
-           log_test.info('ONOS dhcp server offered client requested IP %s as expected'%self.dhcp.seed_ip)
-           self.test_status = True
-  #         self.success =  True
-        if negative_test == "desired_out_of_pool_ip_address":
-           config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                     'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '20.20.20.75', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover(desired = True)
-           if cip or sip is None:
-              self.success = False
-           assert_not_equal(cip, None)
-           log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                      (cip, sip, mac) )
-           if self.dhcp.seed_ip == cip:
-              self.success = False
-           assert_not_equal(cip,self.dhcp.seed_ip)
-           log_test.info('server offered IP from its pool of IPs when requested out of pool IP, as expected')
-           self.test_status = True
-   #        self.success =  True
-        if negative_test == "dhcp_renew":
-           config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                     'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover()
-           log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                                            (cip, sip, mac) )
-           if cip or sip is None:
-              self.success = False
-           assert_not_equal(cip, None)
-           new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-           log_test.info('waiting renew  time %d seconds to send next request packet'%lval)
-           time.sleep(lval)
-           latest_cip, latest_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-           if not latest_cip == cip:
-              self.success = False
-           assert_equal(latest_cip,cip)
-           log_test.info('client got same IP after renew time, as expected')
-           self.test_status = True
-    #       self.success =  True
-        if negative_test == "dhcp_rebind":
-           config = {'startip':'20.20.20.30', 'endip':'20.20.20.69',
-                     'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
-                     'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
-           self.onos_dhcp_table_load(config)
-           self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = onu_iface)
-           cip, sip, mac, _ = self.dhcp.only_discover()
-           log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                                       (cip, sip, mac) )
-           if cip or sip is None:
-              self.success = False
-           assert_not_equal(cip, None)
-           new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-           log_test.info('waiting rebind time %d seconds to send next request packet'%lval)
-           time.sleep(lval)
-           latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-           if not latest_cip == cip:
-              self.success = False
-           assert_equal(latest_cip,cip)
-           log_test.info('client got same IP after rebind time, as expected')
-           self.test_status = True
-     #      self.success =  True
-        return self.test_status
-
-    def dhcprelay_flow_check(self, seed_ip = None, iface = None, mac= None, test_scenario = None):
-        self.success = True
-        if test_scenario is None:
-           self.dhcp = DHCPTest(seed_ip = seed_ip, iface = iface)
-           self.send_recv(mac=mac)
-        elif test_scenario == 'multiple_discover':
-             self.dhcp_2 = DHCPTest(seed_ip = seed_ip, iface = iface)
-             cip, sip, mac, _ = self.dhcp_2.only_discover(mac=mac)
-             log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCP REQUEST.' %
-                                  (cip, sip, mac) )
-             if cip is None:
-                self.success = False
-                assert_not_equal(cip, None)
-             log_test.info('Triggering DHCP discover again.')
-             new_cip, new_sip, new_mac, _ = self.dhcp_2.only_discover(mac=mac)
-             if new_cip != cip:
-                self.success = False
-                assert_equal(new_cip, cip)
-             log_test.info('Got same ip to same the client when sent discover again, as expected')
-
-        elif test_scenario == 'desired_ip':
-             self.dhcp_3 = DHCPTest(seed_ip = '192.168.1.31', iface = iface)
-             cip, sip, mac, _ = self.dhcp_3.only_discover(mac=mac,desired = True)
-             if cip != self.dhcp_3.seed_ip:
-                self.success = False
-                assert_equal(cip,self.dhcp_3.seed_ip)
-             log_test.info('Got dhcp client desired IP %s from server %s for mac %s as expected' %
-                  (cip, sip, mac) )
-
-        elif test_scenario == 'out_of_pool_ip':
-             self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-             cip, sip, mac, _ = self.dhcp.only_discover(mac= mac,desired = True)
-             if cip is None or cip == self.dhcp.seed_ip:
-                self.success = False
-                assert_not_equal(cip,None)
-                assert_not_equal(cip,self.dhcp.seed_ip)
-             log_test.info('server offered IP from its pool when requested out of pool IP, as expected')
-        elif test_scenario == 'multiple_discover_1':
-             pass
-        return self.success
-
-    def recv_channel_cb(self, pkt):
-        ##First verify that we have received the packet for the joined instance
-        chan = self.subscriber.caddr(pkt[IP].dst)
-        assert_equal(chan in self.subscriber.join_map.keys(), True)
-        recv_time = monotonic.monotonic() * 1000000
-        join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
-        delta = recv_time - join_time
-        self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
-        self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
-        log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
-        self.test_status = True
-
-    def traffic_verify(self, subscriber):
-   # if subscriber.has_service('TRAFFIC'):
-        url = 'http://www.google.com'
-        resp = requests.get(url)
-        self.test_status = resp.ok
-        if resp.ok == False:
-              log_test.info('Subscriber %s failed get from url %s with status code %d'
-                         %(subscriber.name, url, resp.status_code))
-        else:
-              log_test.info('GET request from %s succeeded for subscriber %s'
-                         %(url, subscriber.name))
-        return self.test_status
-
-    def igmp_flow_check(self, subscriber, multiple_sub = False):
-        chan = 0
-        for i in range(self.VOLTHA_IGMP_ITERATIONS + subscriber.num_channels):
-            if subscriber.num_channels == 1:
-               if i != 0:
-                  subscriber.channel_leave(chan, src_list = subscriber.src_list)
-               chan = subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-            else:
-               chan = subscriber.channel_join_next(delay = 2, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                 time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-    #        for i in range(1):
-            time.sleep(0.5)
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-            #log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-            #subscriber.channel_leave(chan, src_list = subscriber.src_list)
-            time.sleep(5)
-#           log_test.info('Interface %s Join RX stats for subscriber, %s' %(subscriber.iface,subscriber.join_rx_stats))
-            if subscriber.num_channels == 1:
-               pass
-            elif chan != 0:
-            #Should not receive packets for this channel
-               self.recv_timeout = True
-               subscriber.recv_timeout = True
-               subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-               subscriber.recv_timeout = False
-               self.recv_timeout = False
-            log_test.info('Joining channel %d for subscriber port %s' %(chan, subscriber.rx_port))
-#           subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-#            chan = subscriber.num_channels - i
-#                  self.test_status = True
-        return self.test_status
-
-    def igmp_join_next_channel_flow_check(self, subscriber, multiple_sub = False):
-        chan = 0
-        for i in range(self.VOLTHA_IGMP_ITERATIONS + subscriber.num_channels):
-#            if subscriber.num_channels == 1:
-#               chan = subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-#            else:
-            chan = subscriber.channel_join_next(delay = 2, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                 time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-    #        for i in range(1):
-            time.sleep(0.5)
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-            #log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-            #subscriber.channel_leave(chan, src_list = subscriber.src_list)
-            time.sleep(5)
-#           log_test.info('Interface %s Join RX stats for subscriber, %s' %(subscriber.iface,subscriber.join_rx_stats))
-#            if subscriber.num_channels == 1:
-#               pass
-#            elif chan != 0:
-#               pass
-            #Should not receive packets for this channel
-#               log_test.info
-#               self.recv_timeout = True
-#               subscriber.recv_timeout = True
-#               subscriber.channel_receive(chan-1, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-#               subscriber.recv_timeout = False
-#               self.recv_timeout = False
-#           log_test.info('Joining channel %d for subscriber port %s' %(chan, subscriber.rx_port))
-#           subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-            chan = subscriber.num_channels - i
-#                  self.test_status = True
-        return self.test_status
-
-
-    def igmp_leave_flow_check(self, subscriber, multiple_sub = False):
-        chan = 0
-        for i in range(self.VOLTHA_IGMP_ITERATIONS):
-            subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                 time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-#            for i in range(1):
-            time.sleep(0.5)
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-            log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-            time.sleep(10)
-#           log_test.info('Interface %s Join RX stats for subscriber, %s' %(subscriber.iface,subscriber.join_rx_stats))
-        #Should not receive packets for this subscriber
-            self.recv_timeout = True
-            subscriber.recv_timeout = True
-            subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list)
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-#           log_test.info('Joining channel %d for subscriber port %s' %(chan, subscriber.rx_port))
-#           subscriber.channel_join(chan, delay = 2, src_list = subscriber.src_list)
-#                  self.test_status = True
-        return self.test_status
-
-
-    def igmp_flow_check_join_change_to_exclude(self, subscriber, multiple_sub = False):
-        chan = 0
-        #for i in range(self.VOLTHA_IGMP_ITERATIONS):
-        for i in range(3):
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                  time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[1])
-            time.sleep(5)
-            log_test.info('Leaving channel %d for subscriber on port %s from specific source address %s and waited till GMI timer expires' %(chan, subscriber.rx_port, subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list[0], record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE)
-            #### Adding delay till igmp timer expire data traffic is received from source specific of  subscriber.src_list[0]
-            time.sleep(60)
-            self.recv_timeout = False
-            subscriber.recv_timeout = False
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[1])
-            if self.test_status is True:
-               self.test_status = subscriber.channel_not_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[0])
-            if self.test_status is False:
-               subscriber.channel_leave(chan, src_list = subscriber.src_list)
-               continue
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-#                self.test_status = True
-        return self.test_status
-
-    def igmp_flow_check_join_change_to_exclude_again_include_back(self, subscriber, multiple_sub = False):
-        chan = 0
-        #for i in range(self.VOLTHA_IGMP_ITERATIONS):
-        for i in range(3):
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                  time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[1])
-            time.sleep(5)
-            log_test.info('Leaving channel %d for subscriber on port %s from specific source address %s and waited till GMI timer expires' %(chan, subscriber.rx_port, subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list[0], record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE)
-            #### Adding delay till igmp timer expire data traffic is received from source specific of  subscriber.src_list[0]
-            time.sleep(60)
-            self.recv_timeout = False
-            subscriber.recv_timeout = False
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[1])
-            if self.test_status is True:
-               self.test_status = subscriber.channel_not_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[0])
-            if self.test_status is False:
-               subscriber.channel_leave(chan, src_list = subscriber.src_list)
-               continue
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            log_test.info('Again include the channel %s on port %s with souce list ip %s' %(chan, subscriber.rx_port,subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list, record_type = IGMP_V3_GR_TYPE_CHANGE_TO_INCLUDE)
-            time.sleep(5)
-#            self.recv_timeout = True
-#            subscriber.recv_timeout = True
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[0])
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-
-
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-#                self.test_status = True
-        return self.test_status
-
-    def igmp_flow_check_join_change_to_block(self, subscriber, multiple_sub = False):
-        chan = 0
-        #for i in range(self.VOLTHA_IGMP_ITERATIONS):
-        for i in range(3):
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                  time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[1])
-            time.sleep(5)
-            log_test.info('Leaving channel %d for subscriber on port %s from specific source address %s and waited till GMI timer expires' %(chan, subscriber.rx_port, subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list[0], record_type = IGMP_V3_GR_TYPE_BLOCK_OLD)
-            #### Adding delay till igmp timer expire data traffic is received from source specific of  subscriber.src_list[0]
-            time.sleep(60)
-            self.recv_timeout = False
-            subscriber.recv_timeout = False
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[1])
-            if self.test_status is True:
-               self.test_status = subscriber.channel_not_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[0])
-            if self.test_status is False:
-               subscriber.channel_leave(chan, src_list = subscriber.src_list)
-               continue
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-#                self.test_status = True
-        return self.test_status
-
-
-    def igmp_flow_check_join_change_to_block_again_allow_back(self, subscriber, multiple_sub = False):
-        chan = 0
-        #for i in range(self.VOLTHA_IGMP_ITERATIONS):
-        for i in range(3):
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-            self.num_joins += 1
-            while self.num_joins < self.num_subscribers:
-                  time.sleep(5)
-            log_test.info('All subscribers have joined the channel')
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[1])
-            time.sleep(5)
-            log_test.info('Leaving channel %d for subscriber on port %s from specific source address %s and waited till GMI timer expires' %(chan, subscriber.rx_port, subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list[0], record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE)
-            #### Adding delay till igmp timer expire data traffic is received from source specific of  subscriber.src_list[0]
-            time.sleep(60)
-            self.recv_timeout = False
-            subscriber.recv_timeout = False
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[1])
-            if self.test_status is True:
-               self.test_status = subscriber.channel_not_receive(chan, cb = subscriber.recv_channel_cb, count = 1, src_list = subscriber.src_list[0])
-            if self.test_status is False:
-               subscriber.channel_leave(chan, src_list = subscriber.src_list)
-               continue
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            log_test.info('Again include the channel %s on port %s with souce list ip %s' %(chan, subscriber.rx_port,subscriber.src_list[0]))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list, record_type = IGMP_V3_GR_TYPE_ALLOW_NEW)
-            time.sleep(5)
-#            self.recv_timeout = True
-#            subscriber.recv_timeout = True
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 5, src_list = subscriber.src_list[0])
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-
-
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-#                self.test_status = True
-        return self.test_status
-
-    def igmp_flow_check_group_include_source_empty_list(self, subscriber, multiple_sub = False):
-        chan = 0
-        subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-        self.num_joins += 1
-        while self.num_joins < self.num_subscribers:
-              time.sleep(5)
-        log_test.info('All subscribers have joined the channel')
-        self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-        if self.test_status is True:
-           log_test.info('Subscriber should not receive data from channel %s on any specific source %s, test is failed' %(chan, subscriber.rx_port))
-           self.test_status = False
-        else:
-           log_test.info('Subscriber not receive data from channel %s on any specific source %s' %(chan, subscriber.rx_port))
-           self.test_status = True
-        log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-        subscriber.channel_leave(chan, src_list = subscriber.src_list)
-        time.sleep(5)
-        subscriber.recv_timeout = False
-        self.recv_timeout = False
-        return self.test_status
-
-    def igmp_flow_check_group_exclude_source_empty_list(self, subscriber, multiple_sub = False):
-        chan = 0
-        subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-        self.num_joins += 1
-        while self.num_joins < self.num_subscribers:
-              time.sleep(5)
-        log_test.info('All subscribers have joined the channel')
-        self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-        if self.test_status is True:
-           log_test.info('Subscriber should not receive data from channel %s on any specific source %s, test is failed' %(chan, subscriber.rx_port))
-           self.test_status = False
-        else:
-           log_test.info('Subscriber not receive data from channel %s on any specific source %s' %(chan, subscriber.rx_port))
-           self.test_status = True
-
-        subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list, record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE)
-        log_test.info('Send join to multicast group with exclude empty source list and waited till GMI timer expires')
-        time.sleep(60)
-
-        self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
-        log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-        subscriber.channel_leave(chan, src_list = subscriber.src_list)
-        time.sleep(5)
-        subscriber.recv_timeout = False
-        self.recv_timeout = False
-        return self.test_status
-
-    def igmp_flow_check_group_exclude_source_empty_list_1(self, subscriber, multiple_sub = False):
-        chan = 0
-        subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE)
-        self.num_joins += 1
-        while self.num_joins < self.num_subscribers:
-              time.sleep(5)
-        log_test.info('All subscribers have joined the channel')
-        for i in range(10):
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10, src_list = subscriber.src_list)
-            log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-            time.sleep(5)
-            log_test.info('Interface %s Join RX stats for subscriber, %s' %(subscriber.iface,subscriber.join_rx_stats))
-        #Should not receive packets for this subscriber
-            self.recv_timeout = True
-            subscriber.recv_timeout = True
-            subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10, src_list = subscriber.src_list)
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            log_test.info('Joining channel %d for subscriber port %s' %(chan, subscriber.rx_port))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-#                  self.test_status = True
-        return self.test_status
-
-    def igmp_flow_check_during_olt_onu_operational_issues(self, subscriber, multiple_sub = False):
-        chan = 0
-        subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-        self.num_joins += 1
-        while self.num_joins < self.num_subscribers:
-              time.sleep(5)
-        log_test.info('All subscribers have joined the channel')
-        for i in range(2):
-            self.test_status = subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10, src_list = subscriber.src_list)
-            log_test.info('Leaving channel %d for subscriber on port %s' %(chan, subscriber.rx_port))
-            subscriber.channel_leave(chan, src_list = subscriber.src_list)
-            time.sleep(5)
-            log_test.info('Interface %s Join RX stats for subscriber, %s' %(subscriber.iface,subscriber.join_rx_stats))
-        #Should not receive packets for this subscriber
-            self.recv_timeout = True
-            subscriber.recv_timeout = True
-            subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10, src_list = subscriber.src_list)
-            subscriber.recv_timeout = False
-            self.recv_timeout = False
-            log_test.info('Joining channel %d for subscriber port %s' %(chan, subscriber.rx_port))
-            subscriber.channel_join(chan, delay = 0, src_list = subscriber.src_list)
-#                  self.test_status = True
-        return self.test_status
-
-    def voltha_igmp_jump_verify(self, subscriber):
-	    if subscriber.has_service('IGMP'):
-		  for i in xrange(subscriber.num):
-			log_test.info('Subscriber %s jumping channel' %subscriber.name)
-			chan = subscriber.channel_jump(delay=0)
-			subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
-			log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-			time.sleep(3)
-		  log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
-		  self.test_status = True
-		  return self.test_status
-
-    def voltha_igmp_next_verify(self, subscriber):
-	  for c in xrange(self.VOLTHA_IGMP_ITERATIONS):
-		for i in xrange(subscriber.num):
-		      if i:
-			    chan = subscriber.channel_join_next(delay=0, leave_flag = self.leave_flag)
-			    time.sleep(0.2)
-		      else:
-			    chan = subscriber.channel_join(i, delay=0)
-			    time.sleep(0.2)
-			    if subscriber.num == 1:
-				  subscriber.channel_leave(chan)
-		      log_test.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
-		      #subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
-		      #log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
-	  self.test_status = True
-	  return self.test_status
-
-    def voltha_subscribers(self, services, cbs = None, num_subscribers = 1, num_channels = 1, src_list = None):
-          """Test subscriber join next for channel surfing"""
-          voltha = VolthaCtrl(self.VOLTHA_HOST,
-                              rest_port = self.VOLTHA_REST_PORT,
-                              uplink_vlan_map = self.VOLTHA_UPLINK_VLAN_MAP)
-          if self.VOLTHA_OLT_TYPE.startswith('ponsim'):
-             ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-             log_test.info('Enabling ponsim olt')
-             device_id, status = voltha.enable_device(self.VOLTHA_OLT_TYPE, address = ponsim_address)
-             if device_id != '':
-                self.olt_device_id = device_id
-          else:
-             log_test.info('This setup test cases is developed on ponsim olt only, hence stop execution')
-             assert_equal(False, True)
-
-          assert_not_equal(device_id, None)
-          if status == False:
-                voltha.disable_device(device_id, delete = True)
-          assert_equal(status, True)
-          time.sleep(10)
-          switch_map = None
-          olt_configured = False
-          try:
-                switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-                if not switch_map:
-                      log_test.info('No voltha devices found')
-                      return
-                log_test.info('Installing OLT app')
-                OnosCtrl.install_app(self.olt_app_file)
-                time.sleep(5)
-                log_test.info('Adding subscribers through OLT app')
-                self.config_olt(switch_map)
-                olt_configured = True
-                time.sleep(5)
-                self.num_subscribers = num_subscribers
-                self.num_channels = num_channels
-                test_status = self.subscriber_flows_check(num_subscribers = self.num_subscribers,
-                                                          num_channels = self.num_channels,
-                                                          cbs = cbs,
-                                                          port_list = self.generate_port_list(self.num_subscribers,
-                                                                                              self.num_channels),
-                                                          src_list = src_list, services = services)
-                if test_status is False:
-                   self.success = False
-                assert_equal(test_status, True)
-          finally:
-                if switch_map is not None:
-                      if olt_configured is True:
-                            self.remove_olt(switch_map)
-                      voltha.disable_device(device_id, delete = True)
-                      time.sleep(10)
-                      log_test.info('Uninstalling OLT app')
-                      OnosCtrl.uninstall_app(self.olt_app_name)
-
-    def subscriber_flows_check( self, num_subscribers = 1, num_channels = 1,
-                                  channel_start = 0, cbs = None, port_list = [], src_list = None,
-                                  services = None, negative_subscriber_auth = None):
-          self.test_status = False
-          self.ovs_cleanup()
-          subscribers_count = num_subscribers
-          sub_loop_count =  num_subscribers
-          if not port_list:
-             port_list = self.generate_port_list(num_subscribers, num_channels)
-          subscriber_tx_rx_ports = []
-          for i in range(num_subscribers):
-              subscriber_tx_rx_ports.append(Voltha_olt_subscribers(tx_port = self.port_map[port_list[i][0]],
-                                                                   rx_port = self.port_map[port_list[i][1]],
-                                                                   num_channels = num_channels,src_list = src_list,))
-          self.onos_aaa_load()
-          #load the ssm list for all subscriber channels
-          igmpChannel = IgmpChannel(src_list = src_list)
-          ssm_groups = map(lambda sub: sub.channels, subscriber_tx_rx_ports)
-          ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
-          if src_list is None:
-             igmpChannel = IgmpChannel()
-             igmpChannel.igmp_load_ssm_config(ssm_list)
-          else:
-             igmpChannel = IgmpChannel(src_list = src_list)
-             igmpChannel.igmp_load_ssm_config(ssm_list, src_list= src_list)
-
-          self.thread_pool = ThreadPool(min(100, subscribers_count), queue_size=1, wait_timeout=1)
-
-          chan_leave = False #for single channel, multiple subscribers
-          if cbs is None:
-                cbs = (self.tls_flow_check, self.dhcp_flow_check, self.igmp_flow_check)
-                chan_leave = True
-          for subscriber in subscriber_tx_rx_ports:
-                if 'IGMP' in services:
-#                   if src_list:
-#                      for i in range(len(src_list)):
-#                          subscriber.start(src_ip = src_list[i])
-#                   else:
-#                      subscriber.start()
-                    subscriber.start()
-                sub_loop_count = sub_loop_count - 1
-                pool_object = voltha_subscriber_pool(subscriber, cbs)
-                self.thread_pool.addTask(pool_object.pool_cb)
-          self.thread_pool.cleanUpThreads()
-          for subscriber in subscriber_tx_rx_ports:
-                if services and 'IGMP' in services:
-#                  if src_list:
-#                     for i in range(len(src_list)):
-#                         subscriber.stop(src_ip = src_list[i])
-#                  else:
-#                     subscriber.stop()
-                   subscriber.stop()
-                if chan_leave is True:
-                      subscriber.channel_leave(0)
-          subscribers_count = 0
-          return self.test_status
-
-
-    def generate_port_list(self, subscribers, channels):
-        return self.port_list[:subscribers]
-
-    @classmethod
-    def ovs_cleanup(cls):
-            ##For every test case, delete all the OVS groups
-            cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
-            try:
-                  cord_test_shell(cmd)
-                  ##Since olt config is used for this test, we just fire a careless local cmd as well
-                  os.system(cmd)
-            finally:
-                  return
-
-    def test_olt_enable_disable(self):
-        log_test.info('Enabling OLT type %s, MAC %s' %(self.OLT_TYPE, self.OLT_MAC))
-        device_id, status = self.voltha.enable_device(self.OLT_TYPE, self.OLT_MAC)
-        assert_not_equal(device_id, None)
-        try:
-            assert_equal(status, True)
-            time.sleep(10)
-        finally:
-            self.voltha.disable_device(device_id, delete = True)
-
-    def test_ponsim_enable_disable(self):
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        try:
-            assert_equal(status, True)
-            time.sleep(10)
-        finally:
-            self.voltha.disable_device(device_id, delete = True)
-
-    def test_maple_enable_disable(self):
-        log_test.info('Enabling maple olt')
-        if self.VOLTHA_OLT_IP:
-              address = self.VOLTHA_OLT_IP
-              device_id, status = self.voltha.enable_device('maple_olt', address = address)
-              assert_not_equal(device_id, None)
-              try:
-                    assert_equal(status, True)
-                    time.sleep(10)
-              finally:
-                    self.voltha.disable_device(device_id, delete = True)
-
-    def test_subscriber_with_voltha_for_eap_tls_authentication(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  auth request packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls valid auth packets are being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber is authenticated successfully.
-        """
-        ret = voltha_setup(
-              host = self.VOLTHA_HOST,
-              ponsim_host = self.VOLTHA_PONSIM_HOST,
-              rest_port = self.VOLTHA_REST_PORT,
-              olt_type = 'ponsim_olt',
-              uplink_vlan_map = self.VOLTHA_UPLINK_VLAN_MAP,
-              uplink_vlan_start = self.VOLTHA_UPLINK_VLAN_START,
-              config_fake = self.VOLTHA_CONFIG_FAKE,
-              olt_app = self.olt_app_file)
-        assert_not_equal(ret, None)
-        voltha, device_id, switch_map, preconfigured = ret[0], ret[1], ret[2], ret[3]
-        if self.VOLTHA_TEARDOWN is False:
-              preconfigured = True
-        try:
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT)
-            assert_equal(auth_status, True)
-        finally:
-            if switch_map is not None and preconfigured is False:
-                if olt_configured is True:
-                    self.remove_olt(switch_map)
-                voltha_teardown(voltha, device_id, switch_map, olt_app = self.olt_app_file)
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_failure(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls without cert auth packet is being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_with_no_cert_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "no_cert")
-            try:
-                assert_equal(auth_status, True)
-                assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.remove_olt(switch_map)
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-
-        reactor.callLater(0, tls_flow_check_with_no_cert_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_using_invalid_cert(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets and exchange invalid cert from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls with invalid cert auth packet is being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_with_invalid_cert_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "invalid_cert")
-            try:
-                assert_equal(auth_status, True)
-                assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.remove_olt(switch_map)
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_with_invalid_cert_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_multiple_invalid_authentication_attempts(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets and exchange invalid cert from CORD TESTER voltha test module acting as a subscriber for multiple times.
-        4. Validate that eap tls with invalid cert auth packet is being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_with_no_cert_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "invalid_cert")
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "invalid_cert")
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "no_cert")
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT, cert_info = "invalid_cert")
-            try:
-                assert_equal(auth_status, True)
-                assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_with_no_cert_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_with_aaa_app_deactivation(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls without sending client hello, it's not being exchanged between client, onos and freeradius.
-        5. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_deactivating_app(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,"app_deactivate",))
-            thread2 = threading.Thread(target = self.deactivate_apps, args = (aaa_app,))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Restart aaa app in onos during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_deactivating_app, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_restarting_radius_server(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls with restart of radius server and packets are being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_restarting_radius(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,"restart_radius"))
-            thread2 = threading.Thread(target = cord_test_radius_restart)
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Restart radius server during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_restarting_radius, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_with_disabled_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Disable olt which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_operating_olt_state(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "disable_olt_device",))
-            thread2 = threading.Thread(target = self.voltha.disable_device, args = (device_id, False,))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Disable the ponsim olt device during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_operating_olt_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_disabling_uni_port(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Disable uni port which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_operating_olt_state(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "uni_port_admin_down",))
-            thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_operating_olt_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT +600)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_carrying_out_multiple_times_toggling_of_uni_port(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Disable uni port which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        10. Repeat steps from 3 to 9 for 10 times and finally verify tls flow
-
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        def tls_flow_check_with_disable_olt_device_scenario(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "uni_port_admin_down",))
-                thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-                thread1.start()
-                time.sleep(randint(1,2))
-                log_test.info('Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha')
-                thread2.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            time.sleep(60)
-            cord_test_radius_restart()
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT)
-            try:
-        #        assert_equal(status, True)
-                assert_equal(auth_status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_with_disable_olt_device_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_restarting_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Restart olt which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_operating_olt_state(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "restart_olt_device",))
-            thread2 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Restart the ponsim olt device during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_operating_olt_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_performing_multiple_times_restart_of_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Restart olt which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        10. Repeat steps from 3 to 9 for 10 times and finally verify tls flow
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        def tls_flow_check_with_disable_olt_device_scenario(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "restart_olt_device",))
-                thread2 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-                thread1.start()
-                time.sleep(randint(1,2))
-                log_test.info('Restart the ponsim olt device during tls auth flow check on voltha')
-                thread2.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            time.sleep(60)
-            cord_test_radius_restart()
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT)
-            try:
-        #        assert_equal(status, True)
-                assert_equal(auth_status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_with_disable_olt_device_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_restarting_onu(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Restart onu which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        """
-        df = defer.Deferred()
-        def tls_flow_check_operating_onu_state(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "restart_onu_device",))
-            thread2 = threading.Thread(target = self.voltha.restart_device, args = (onu_device_id,))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Restart the ponsim oon device during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_operating_onu_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_eap_tls_authentication_performing_multiple_times_restart_of_onu(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Restart onu which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        10. Repeat steps from 3 to 9 for 10 times and finally verify tls flow
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        def tls_flow_check_operating_olt_state(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT, "restart_onu_device",))
-                thread2 = threading.Thread(target = self.voltha.restart_device, args = (onu_device_id,))
-                thread1.start()
-                time.sleep(randint(1,2))
-                log_test.info('Restart the ponsim oon device during tls auth flow check on voltha')
-                thread2.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            time.sleep(60)
-            cord_test_radius_restart()
-            auth_status = self.tls_flow_check(self.INTF_RX_DEFAULT)
-            try:
-        #        assert_equal(status, True)
-                assert_equal(auth_status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_operating_olt_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_eap_tls_authentication(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT is detected and ONU ports(nni and 2 uni's) are being seen.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Bring up two Residential subscribers from cord-tester and issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls valid auth packets are being exchanged between two subscriber, onos and freeradius.
-        5. Verify that two subscribers are authenticated successfully.
-        """
-
-        df = defer.Deferred()
-        def tls_flow_check_on_two_subscribers_same_olt_device(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Initiating tls auth packets from one more subscriber on same olt device which is deteced on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_on_two_subscribers_same_olt_device, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_eap_tls_authentication_using_same_certificates(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT is detected and ONU ports(nni and 2 uni's) are being seen.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Bring up two Residential subscribers from cord-tester and issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that two valid certificates are being exchanged between two subscriber, onos and freeradius.
-        5. Verify that two subscribers are not authenticated.
-        """
-        df = defer.Deferred()
-        def tls_flow_check_on_two_subscribers_same_olt_device(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_2_RX_DEFAULT, "same_cert",))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Initiating tls auth packets from one more subscriber on same olt device which is deteced on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                 assert_equal(self.success, True)
-                 time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_on_two_subscribers_same_olt_device, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_eap_tls_authentication_initiating_invalid_tls_packets_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT is detected and ONU ports(nni and 2 uni's) are being seen.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Bring up two Residential subscribers from cord-tester and issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls valid auth packets are being exchanged between valid subscriber, onos and freeradius.
-        5. Validate that eap tls valid auth packets are being exchanged between invalid client, onos and freeradius.
-        6. Verify that valid subscriber authenticated successfully.
-        7. Verify that invalid subscriber are not authenticated successfully.
-        """
-
-        df = defer.Deferred()
-        def tls_flow_check_on_two_subscribers_same_olt_device(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_2_RX_DEFAULT, "no_cert",))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Initiating tls auth packets from one more subscriber on same olt device which is deteced on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                 assert_equal(self.success, True)
-                 time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_on_two_subscribers_same_olt_device, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_eap_tls_authentication_initiating_invalid_cert_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT is detected and ONU ports(nni and 2 uni's) are being seen.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Bring up two Residential subscribers from cord-tester and issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        4. Validate that eap tls valid auth packets are being exchanged between valid subscriber, onos and freeradius.
-        5. Validate that eap tls invalid cert auth packets are being exchanged between invalid subscriber, onos and freeradius.
-        6. Verify that valid subscriber authenticated successfully.
-        7. Verify that invalid subscriber are not authenticated successfully.
-        """
-
-        df = defer.Deferred()
-        def tls_flow_check_on_two_subscribers_same_olt_device(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_2_RX_DEFAULT, "invalid_cert",))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Initiating tls auth packets from one more subscriber on same olt device which is deteced on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_on_two_subscribers_same_olt_device, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_eap_tls_authentication_with_one_uni_port_disabled(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Bring up two Residential subscribers from cord-tester and issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        5. Validate that eap tls packets are being exchanged between two subscriber, onos and freeradius.
-        6. Verify that subscriber authenticated successfully.
-        7. Disable one of the uni port which is seen in voltha and issue tls auth packets from subscriber.
-        8. Validate that eap tls packets are not being exchanged between one subscriber, onos and freeradius.
-        9. Verify that subscriber authentication is unsuccessful..
-        10. Verify that other subscriber authenticated successfully.
-        """
-
-        df = defer.Deferred()
-        def tls_flow_check_on_two_subscribers_same_olt_device(df):
-            aaa_app = ["org.opencord.aaa"]
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            devices_list = self.voltha.get_devices()
-            log_test.info('All available devices on voltha = %s'%devices_list['items'])
-
-            onu_device_id = devices_list['items'][1]['id']
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            devices_list = self.voltha.get_devices()
-            thread1 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.tls_flow_check, args = (self.INTF_2_RX_DEFAULT, "uni_port_admin_down",))
-            thread1.start()
-            time.sleep(randint(1,2))
-            log_test.info('Initiating tls auth packets from one more subscriber on same olt device which is deteced on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-        #        assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-        reactor.callLater(0, tls_flow_check_on_two_subscribers_same_olt_device, df)
-        return df
-
-    def test_three_subscribers_with_voltha_for_eap_tls_authentication(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue auth request packets from CORD TESTER voltha test module acting as multipe subscribers (3 subscribers)
-        4. Validate that eap tls valid auth packets are being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber is authenticated successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 3
-        num_channels = 1
-        services = ('TLS')
-        cbs = (self.tls_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-        assert_equal(self.success, True)
-
-    def test_five_subscribers_with_voltha_for_eap_tls_authentication(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue auth request packets from CORD TESTER voltha test module acting as multipe subscribers (5 subscriber)
-        4. Validate that eap tls valid auth packets are being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber is authenticated successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 5
-        num_channels = 1
-        services = ('TLS')
-        cbs = (self.tls_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                      num_subscribers = num_subscribers,
-                                      num_channels = num_channels)
-        assert_equal(self.success, True)
-
-    def test_nine_subscribers_with_voltha_for_eap_tls_authentication(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Bring up freeradius server container using CORD TESTER and make sure that ONOS have connectivity to freeradius server.
-        3. Issue auth request packets from CORD TESTER voltha test module acting as multipe subscribers (9 subscriber)
-        4. Validate that eap tls valid auth packets are being exchanged between subscriber, onos and freeradius.
-        5. Verify that subscriber is authenticated successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 9
-        num_channels = 1
-        services = ('TLS')
-        cbs = (self.tls_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-        assert_equal(self.success, True)
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_request(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.remove_olt(switch_map)
-                self.voltha.disable_device(device_id, delete = True)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_request_with_invalid_broadcast_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac broadcast from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber should not get ip from dhcp server.
-        """
-
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "invalid_src_mac_broadcast")
-            try:
-                assert_equal(dhcp_status, True)
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_request_with_invalid_multicast_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac multicast from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber should not get ip from dhcp server.
-        """
-
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "invalid_src_mac_multicast")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_request_with_invalid_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac zero from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber should not get ip from dhcp server.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "invalid_src_mac_junk")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_request_and_release(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Send dhcp release from residential subscrber to dhcp server which is running as onos app.
-        6  Verify that subscriber should not get ip from dhcp server, ping to gateway.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "request_release")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_starvation_positive_scenario(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Repeat step 3 and 4 for 10 times.
-        6  Verify that subscriber should get ip from dhcp server.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "starvation_positive")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_starvation_negative_scenario(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber without of pool ip to dhcp server which is running as onos app.
-        4. Verify that subscriber should not get ip from dhcp server.
-        5. Repeat steps 3 and 4 for 10 times.
-        6  Verify that subscriber should not get ip from dhcp server.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "starvation_negative")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_sending_multiple_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Repeat step 3 for 50 times.
-        6  Verify that subscriber should get same ip which was received from 1st discover from dhcp server.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "multiple_discover")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_sending_multiple_request(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Send DHCP request to dhcp server which is running as onos app.
-        6. Repeat step 5 for 50 times.
-        7. Verify that subscriber should get same ip which was received from 1st discover from dhcp server.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "multiple_requests")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_requesting_desired_ip_address(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired ip address from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip which was requested in step 3 from dhcp server successfully.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "desired_ip_address")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_requesting_desired_out_of_pool_ip_address(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired out of pool ip address from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber should not get ip which was requested in step 3 from dhcp server, and its offered only within dhcp pool of ip.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "desired_out_of_pool_ip_address")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_deactivating_dhcp_app_in_onos(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Deactivate dhcp server app in onos.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        """
-        df = defer.Deferred()
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-            thread2 = threading.Thread(target = self.deactivate_apps, args = (dhcp_app,))
-            log_test.info('Restart dhcp app in onos during client send discover to voltha')
-            thread2.start()
-            thread1.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_renew_time(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Send dhcp renew packet to dhcp server which is running as onos app.
-        6. Repeat step 4.
-        """
-
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "dhcp_renew")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_rebind_time(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Send dhcp rebind packet to dhcp server which is running as onos app.
-        6. Repeat step 4.
-        """
-        df = defer.Deferred()
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT, "dhcp_rebind")
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_toggling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        """
-        df = defer.Deferred()
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-            thread2 = threading.Thread(target = self.voltha.disable_device, args = (device_id,False,))
-            log_test.info('Disable the olt device in during client send discover to voltha')
-            thread2.start()
-#            time.sleep(randint(0,1))
-            thread1.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_with_multiple_times_disabling_of_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        8. Repeat steps from 3 to 7 for 10 times and finally verify dhcp flow
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-                thread2 = threading.Thread(target = self.voltha.disable_device, args = (device_id,False,))
-                log_test.info('Disable the olt device in during client send discover to voltha')
-                thread2.start()
-#            time.sleep(randint(0,1))
-                thread1.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            try:
-                assert_equal(self.success, True)
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_toggling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        8. Enable olt devices which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-        df = defer.Deferred()
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-            thread2 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-            thread2.start()
-            thread1.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_toggling_olt_multiple_times(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        8. Enable olt devices which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-
-        df = defer.Deferred()
-        no_iterations = 10
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-                thread2 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-                thread2.start()
-                thread1.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            try:
-                assert_equal(dhcp_status, True)
-                #assert_equal(status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_disabling_onu_port(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        """
-        df = defer.Deferred()
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-            thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_disabling_onu_port_multiple_times(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-                thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-                thread1.start()
-                thread2.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            try:
-                #assert_equal(status, True)
-                assert_equal(dhcp_status, True)
-                assert_equal(self.success, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_toggling_onu_port(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        8. Enable onu port which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-
-        df = defer.Deferred()
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-            thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-            log_test.info('Restart dhcp app in onos during client send discover to voltha')
-            thread2.start()
-            time.sleep(randint(0,1))
-            thread1.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            assert_equal(dhcp_status, True)
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcp_toggling_onu_port_multiple_times(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from dhcp server, and ping to gateway.
-        8. Enable onu port which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-
-        df = defer.Deferred()
-        no_iterations = 10
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            for i in range(no_iterations):
-                thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT, "interrupting_dhcp_flows",))
-                thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-                log_test.info('Restart dhcp app in onos during client send discover to voltha')
-                thread2.start()
-                time.sleep(randint(0,1))
-                thread1.start()
-                time.sleep(10)
-                thread1.join()
-                thread2.join()
-            dhcp_status = self.dhcp_flow_check(self.INTF_RX_DEFAULT)
-            assert_equal(dhcp_status, True)
-            try:
-                assert_equal(self.success, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got different ips from dhcp server successfully.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-#                if self.success is not True:
-                assert_equal(dhcp_flow_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_multiple_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Repeat step 3 and 4 for 10 times for both subscribers.
-        6  Verify that subscribers should get same ips which are offered the first time from dhcp server.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,"multiple_discover",))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,"multiple_discover",))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-#                if self.success is not True:
-                assert_equal(dhcp_flow_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_and_with_multiple_discover_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Repeat step 3 and 4 for 10 times for only one subscriber and ping to gateway from other subscriber.
-        6  Verify that subscriber should get same ip which is offered the first time from dhcp server and other subscriber ping to gateway should not failed
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,"multiple_discover",))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-#                if self.success is not True:
-                assert_equal(dhcp_flow_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_discover_and_desired_ip_address_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from one residential subscriber to dhcp server which is running as onos app.
-        3. Send dhcp request with desired ip from other residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscribers had got different ips (one subscriber desired ip and other subscriber random ip) from dhcp server successfully.
-        """
-
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,"desired_ip_address",))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-#                if self.success is not True:
-                assert_equal(dhcp_flow_status, True)
-                #assert_equal(status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_discover_within_and_without_dhcp_pool_ip_addresses(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired wihtin dhcp pool ip from one residential subscriber to dhcp server which is running as onos app.
-        3. Send dhcp request with desired without in dhcp pool ip from other residential subscriber to dhcp server which is running as onos app.
-        4. Verify that subscribers had got different ips (both subscriber got random ips within dhcp pool) from dhcp server successfully.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,"desired_ip_address",))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,"desired_out_of_pool_ip_address",))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_disabling_onu_port_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Disable onu port on which access one subscriber and ping to gateway from other subscriber.
-        6. Repeat step 3 and 4 for one subscriber where uni port is down.
-        7. Verify that subscriber should not get ip from dhcp server and other subscriber ping to gateway should not failed.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,"desired_ip_address",))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,"desired_out_of_pool_ip_address",))
-            thread1.start()
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_toggling_onu_port_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Disable onu port on which access one subscriber and ping to gateway from other subscriber.
-        6. Repeat step 3 and 4 for one subscriber where uni port is down.
-        7. Verify that subscriber should not get ip from dhcp server and other subscriber ping to gateway should not failed.
-        8. Enable onu port on which was disable at step 5 and ping to gateway from other subscriber.
-        9. Repeat step 3 and 4 for one subscriber where uni port is up now.
-        10. Verify that subscriber should get ip from dhcp server and other subscriber ping to gateway should not failed.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread3 = threading.Thread(target = self.voltha_uni_port_toggle, args = (self.INTF_2_RX_DEFAULT,))
-            thread1.start()
-            thread2.start()
-            thread3.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            thread3.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_disabling_olt(self):
-        """
-        Test Method: uni_port
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Disable the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from dhcp server and other subscriber ping to gateway should failed.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread3 = threading.Thread(target = self.voltha.disable_device, args = (device_id,False,))
-
-            thread1.start()
-            thread2.start()
-            thread3.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            thread3.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_toggling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Disable the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from dhcp server and other subscriber ping to gateway should failed.
-        8. Enable the olt device which is detected in voltha.
-        9. Verify that subscriber should get ip from dhcp server and other subscriber ping to gateway should not failed.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread3 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-            thread1.start()
-            thread2.start()
-            thread3.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            thread3.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_dhcp_with_paused_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to dhcp server which is running as onos app.
-        4. Verify that subscribers had got ip from dhcp server successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Pause the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from dhcp server and other subscriber ping to gateway should failed.
-        """
-        df = defer.Deferred()
-        self.success = True
-        dhcp_app =  'org.onosproject.dhcp'
-        def dhcp_flow_check_scenario(df):
-            log_test.info('Enabling ponsim_olt')
-            ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-            device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-            assert_not_equal(device_id, None)
-            voltha = VolthaCtrl(**self.voltha_attrs)
-            time.sleep(10)
-            switch_map = None
-            olt_configured = False
-            switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-            log_test.info('Installing OLT app')
-            OnosCtrl.install_app(self.olt_app_file)
-            time.sleep(5)
-            log_test.info('Adding subscribers through OLT app')
-            self.config_olt(switch_map)
-            olt_configured = True
-            time.sleep(5)
-            thread1 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_RX_DEFAULT,))
-            thread2 = threading.Thread(target = self.dhcp_flow_check, args = (self.INTF_2_RX_DEFAULT,))
-            thread3 = threading.Thread(target = self.voltha.pause_device, args = (device_id,))
-            thread1.start()
-            thread2.start()
-            thread3.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            thread3.join()
-            dhcp_flow_status = self.success
-            try:
-                assert_equal(dhcp_flow_status, True)
-                time.sleep(10)
-            finally:
-                self.voltha.disable_device(device_id, delete = True)
-                self.remove_olt(switch_map)
-            df.callback(0)
-
-        reactor.callLater(0, dhcp_flow_check_scenario, df)
-        return df
-
-    def test_three_subscribers_with_voltha_for_dhcp_discover_requests(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (3 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 3
-        num_channels = 1
-        services = ('DHCP')
-        cbs = (self.dhcp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_five_subscribers_with_voltha_for_dhcp_discover_requests(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (5 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 5
-        num_channels = 1
-        services = ('DHCP')
-        cbs = (self.dhcp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_nine_subscribers_with_voltha_for_dhcp_discover_requests(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (9 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 9 subscribers browsing 1 channels each"""
-        num_subscribers = 9
-        num_channels = 1
-        services = ('DHCP')
-        cbs = (self.dhcp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_three_subscribers_with_voltha_for_tls_auth_and_dhcp_discover_flows(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (3 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 3
-        num_channels = 1
-        services = ('TLS','DHCP')
-        cbs = (self.tls_flow_check, self.dhcp_flow_check, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_five_subscribers_with_voltha_for_tls_auth_and_dhcp_discover_flows(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (5 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 5
-        num_channels = 1
-        services = ('TLS','DHCP')
-        cbs = (self.tls_flow_check, self.dhcp_flow_check, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_nine_subscribers_with_voltha_for_tls_auth_and_dhcp_discover_flows(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as multiple subscribers (9 subscribers)
-        3. Send dhcp request from residential subscrber to dhcp server which is running as onos app.
-        4. Verify that subscriber get ip from dhcp server successfully.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 9
-        num_channels = 1
-        services = ('TLS','DHCP')
-        cbs = (self.tls_flow_check, self.dhcp_flow_check, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-#    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcprelay_request(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscrber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server successfully.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        try:
-           self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-           self.send_recv(mac=mac)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcprelay_request_with_invalid_broadcast_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac broadcast from residential subscrber to external dhcp server.
-        4. Verify that subscriber should not get ip from external dhcp server.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
-        try:
-            assert_equal(cip,None)
-            log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-        finally:
-            self.voltha.disable_device(device_id, delete = True)
-            self.voltha_dhcprelay_tearDownClass()
-
-#    @deferred(TESTCASE_TIMEOUT)
-    def test_subscriber_with_voltha_for_dhcprelay_request_with_invalid_multicast_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac multicast from residential subscrber to external dhcp server.
-        4. Verify that subscriber should not get ip from external dhcp server.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:01:98:05')
-        try:
-           assert_equal(cip,None)
-           log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_request_with_invalid_source_mac(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with invalid source mac zero from residential subscrber to external dhcp server.
-        4. Verify that subscriber should not get ip from external dhcp server.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
-        try:
-           assert_equal(cip,None)
-           log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_request_and_release(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscrber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server successfully.
-        5. Send dhcp release from residential subscrber to external dhcp server.
-        6  Verify that subscriber should not get ip from external dhcp server, ping to gateway.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
-        cip, sip = self.send_recv(mac=mac)
-        log_test.info('Releasing ip %s to server %s' %(cip, sip))
-        try:
-           assert_equal(self.dhcp.release(cip), True)
-           log_test.info('Triggering DHCP discover again after release')
-           cip2, sip2 = self.send_recv(mac=mac)
-           log_test.info('Verifying released IP was given back on rediscover')
-           assert_equal(cip, cip2)
-           log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
-           assert_equal(self.dhcp.release(cip2), True)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    ##Not yet validated
-    def test_subscriber_with_voltha_for_dhcprelay_starvation(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Repeat step 3 and 4 for 10 times.
-        6  Verify that subscriber should get ip from external dhcp server..
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        #self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        log_test.info('Verifying 1 ')
-        count = 0
-        while True:
-            #mac = RandMAC()._fix()
-            cip, sip = self.send_recv(mac=mac,update_seed = True,validate = False)
-            if cip is None:
-                break
-            else:
-                count += 1
-        assert_equal(count,91)
-        log_test.info('Verifying 2 ')
-        cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
-        try:
-           assert_equal(cip, None)
-           assert_equal(sip, None)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_starvation_negative_scenario(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber without of pool ip to external dhcp server.
-        4. Verify that subscriber should not get ip from external dhcp server..
-        5. Repeat steps 3 and 4 for 10 times.
-        6  Verify that subscriber should not get ip from external dhcp server..
-        """
-    def test_subscriber_with_voltha_for_dhcprelay_sending_multiple_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Repeat step 3 for 50 times.
-        6  Verify that subscriber should get same ip which was received from 1st discover from external dhcp server..
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCP REQUEST.' %
-                  (cip, sip, mac) )
-        try:
-           assert_not_equal(cip, None)
-           log_test.info('Triggering DHCP discover again.')
-           new_cip, new_sip, new_mac, _ = self.dhcp.only_discover(mac=mac)
-           assert_equal(new_cip, cip)
-           log_test.info('Got same ip to same the client when sent discover again, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_sending_multiple_requests(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Send DHCP request to external dhcp server.
-        6. Repeat step 5 for 50 times.
-        7. Verify that subscriber should get same ip which was received from 1st discover from external dhcp server..
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        log_test.info('Sending DHCP discover and DHCP request.')
-        cip, sip = self.send_recv(mac=mac)
-        mac = self.dhcp.get_mac(cip)[0]
-        log_test.info("Sending DHCP request again.")
-        new_cip, new_sip = self.dhcp.only_request(cip, mac)
-        try:
-           assert_equal(new_cip, cip)
-           log_test.info('got same ip to smae the client when sent request again, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_requesting_desired_ip_address(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired ip address from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip which was requested in step 3 from external dhcp server. successfully.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '192.168.1.31', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac,desired = True)
-        try:
-           assert_equal(cip,self.dhcp.seed_ip)
-           log_test.info('Got dhcp client desired IP %s from server %s for mac %s as expected' %
-                  (cip, sip, mac) )
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_requesting_desired_out_of_pool_ip_address(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired out of pool ip address from residential subscriber to external dhcp server.
-        4. Verify that subscriber should not get ip which was requested in step 3 from external dhcp server., and its offered only within dhcp pool of ip.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac= mac,desired = True)
-        try:
-           assert_not_equal(cip,None)
-           assert_not_equal(cip,self.dhcp.seed_ip)
-           log_test.info('server offered IP from its pool when requested out of pool IP, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_deactivating_dhcprelay_app_in_onos(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Deactivate dhcp server app in onos.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from external dhcp server., and ping to gateway.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac,))
-        thread2 = threading.Thread(target = self.deactivate_apps, args = (self.app_dhcprelay,))
-        log_test.info('Restart dhcprelay app in onos during client send discover to voltha')
-        thread2.start()
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_renew_time(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Send dhcp renew packet to external dhcp server.
-        6. Repeat step 4.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        new_options = [('dhcp-renewal-time', 100), ('dhcp-rebinding-time', 125)]
-        options = self.default_options + new_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-        try:
-           assert_not_equal(cip,None)
-           new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
-           log_test.info('Waiting for  renew  time..')
-           time.sleep(lval)
-           latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
-           assert_equal(latest_cip, cip)
-           log_test.info('Server renewed client IP when client sends request after renew time, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_rebind_time(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Send dhcp rebind packet to external dhcp server.
-        6. Repeat step 4.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        new_options = [('dhcp-renewal-time', 100), ('dhcp-rebinding-time', 125)]
-        options = self.default_options + new_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_PONSIM_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
-        cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
-        log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
-                  (cip, sip, mac) )
-        try:
-           assert_not_equal(cip,None)
-           new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
-           log_test.info('Waiting for  rebind  time..')
-           time.sleep(lval)
-           latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
-           assert_equal(latest_cip, cip)
-           log_test.info('Server renewed client IP when client sends request after rebind time, as expected')
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_disabling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from external dhcp server., and ping to gateway.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac,))
-        thread2 = threading.Thread(target = self.voltha.disable_device, args = (device_id,False,))
-        log_test.info('Disable olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-
-    def test_subscriber_with_voltha_for_dhcprelay_toggling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Disable olt devices which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from external dhcp server., and ping to gateway.
-        8. Enable olt devices which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac,))
-        thread2 = threading.Thread(target = self.voltha.restart_device, args = (device_id,))
-        log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-
-    def test_subscriber_with_voltha_for_dhcprelay_disable_onu_port_in_voltha(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from external dhcp server., and ping to gateway.
-        """
-
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac,))
-        thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-        log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_subscriber_with_voltha_for_dhcprelay_toggle_onu_port_in_voltha(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from residential subscriber to external dhcp server.
-        4. Verify that subscriber get ip from external dhcp server. successfully.
-        5. Disable onu port which is being detected in voltha CLI.
-        6. Repeat step 3.
-        7. Verify that subscriber should not get ip from external dhcp server., and ping to gateway.
-        8. Enable onu port which is being detected in voltha CLI.
-        9. Repeat steps 3 and 4.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface = self.port_map[self.port_list[0][1]]
-        mac = self.get_mac(iface)
-        self.host_load(iface)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac,))
-        thread2 = threading.Thread(target = self.voltha_uni_port_toggle)
-        log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-           cip, sip = self.send_recv(mac=mac)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got different ips from external dhcp server. successfully.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-        self.dhcp_1 = DHCPTest(seed_ip = '10.10.10.1', iface = iface_1)
-        self.dhcp_2 = DHCPTest(seed_ip = '20.20.20.1', iface = iface_2)
-        thread1 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac_1,False,True,self.dhcp_1,))
-        thread2 = threading.Thread(target = self.send_recv_function_in_thread, args = (mac_2,False,True,self.dhcp_2,))
-#        log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, True)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_multiple_discover(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Repeat step 3 and 4 for 10 times for both subscribers.
-        6  Verify that subscribers should get same ips which are offered the first time from external dhcp server..
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-        thread1 = threading.Thread(target = self.dhcprelay_flow_check, args = ('10.10.10.1', iface_1, mac_1,'multiple_discover',))
-        thread2 = threading.Thread(target = self.dhcprelay_flow_check, args = ('20.20.20.1', iface_2, mac_2,'multiple_discover',))
-        #log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, True)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_multiple_discover_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Repeat step 3 and 4 for 10 times for only one subscriber and ping to gateway from other subscriber.
-        6  Verify that subscriber should get same ip which is offered the first time from external dhcp server. and other subscriber ping to gateway should not failed
-        """
-
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-        thread1 = threading.Thread(target = self.dhcprelay_flow_check, args = ('10.10.10.1', iface_1, mac_1,))
-        thread2 = threading.Thread(target = self.dhcprelay_flow_check, args = ('20.20.20.1', iface_2, mac_2,'multiple_discover',))
-        #log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, True)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_discover_desired_ip_address_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from one residential subscriber to external dhcp server.
-        3. Send dhcp request with desired ip from other residential subscriber to external dhcp server.
-        4. Verify that subscribers had got different ips (one subscriber desired ip and other subscriber random ip) from external dhcp server. successfully.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-        thread1 = threading.Thread(target = self.dhcprelay_flow_check, args = ('10.10.10.1', iface_1, mac_1,))
-        thread2 = threading.Thread(target = self.dhcprelay_flow_check, args = ('20.20.20.1', iface_2, mac_2,'desired_ip',))
-        #log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, True)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_discover_for_in_range_and_out_of_range_from_dhcp_pool_ip_addresses(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request with desired wihtin dhcp pool ip from one residential subscriber to external dhcp server.
-        3. Send dhcp request with desired without in dhcp pool ip from other residential subscriber to external dhcp server.
-        4. Verify that subscribers had got different ips (both subscriber got random ips within dhcp pool) from external dhcp server. successfully.
-        """
-
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-        thread1 = threading.Thread(target = self.dhcprelay_flow_check, args = ('10.10.10.1', iface_1, mac_1,))
-        thread2 = threading.Thread(target = self.dhcprelay_flow_check, args = ('20.20.20.1', iface_2, mac_2,'out_of_pool_ip',))
-        #log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(4)
-        thread1.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        try:
-           assert_equal(self.success, True)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_disabling_onu_port_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Disable onu port on which access one subscriber and ping to gateway from other subscriber.
-        6. Repeat step 3 and 4 for one subscriber where uni port is down.
-        7. Verify that subscriber should not get ip from external dhcp server. and other subscriber ping to gateway should not failed.
-        """
-        self.voltha_dhcprelay_setUpClass()
-#       if not port_list:
-#        port_list = self.generate_port_list(1, 0)
-        iface_1 = self.port_map[self.port_list[0][1]]
-        iface_2 = self.port_map[self.port_list[3][1]]
-        mac_1 = self.get_mac(iface_1)
-        mac_2 = self.get_mac(iface_2)
-        self.host_load(iface_1)
-        self.host_load(iface_2)
-        ##we use the defaults for this test that serves as an example for others
-        ##You don't need to restart dhcpd server if retaining default config
-        config = self.default_config
-        options = self.default_options
-        subnet = self.default_subnet_config
-        dhcpd_interface_list = self.relay_interfaces
-        log_test.info('Enabling ponsim_olt')
-        ponsim_address = '{}:50060'.format(self.VOLTHA_HOST)
-        device_id, status = self.voltha.enable_device('ponsim_olt', address = ponsim_address)
-        assert_not_equal(device_id, None)
-        voltha = VolthaCtrl(**self.voltha_attrs)
-        time.sleep(10)
-        switch_map = None
-        olt_configured = False
-        switch_map = voltha.config(fake = self.VOLTHA_CONFIG_FAKE)
-        log_test.info('Installing OLT app')
-        OnosCtrl.install_app(self.olt_app_file)
-        time.sleep(5)
-        log_test.info('Adding subscribers through OLT app')
-        self.config_olt(switch_map)
-        olt_configured = True
-        time.sleep(5)
-        self.dhcpd_start(intf_list = dhcpd_interface_list,
-                         config = config,
-                         options = options,
-                         subnet = subnet)
-
-        thread1 = threading.Thread(target = self.dhcprelay_flow_check, args = ('10.10.10.1', iface_1, mac_1,))
-        thread2 = threading.Thread(target = self.dhcprelay_flow_check, args = ('20.20.20.1', iface_2, mac_2,'desired_ip',))
-        thread3 = threading.Thread(target = self.voltha_uni_port_toggle, args = (iface_2,))
-        #log_test.info('Restart olt devices during client send discover to voltha')
-        thread2.start()
-        time.sleep(8)
-        thread1.start()
-        thread3.start()
-        time.sleep(10)
-        thread1.join()
-        thread2.join()
-        thread3.join()
-        try:
-           assert_equal(self.success, False)
-           #assert_equal(status, True)
-           time.sleep(10)
-        finally:
-           self.voltha.disable_device(device_id, delete = True)
-           self.voltha_dhcprelay_tearDownClass()
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_toggling_onu_port_for_one_subscriber(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Disable onu port on which access one subscriber and ping to gateway from other subscriber.
-        6. Repeat step 3 and 4 for one subscriber where uni port is down.
-        7. Verify that subscriber should not get ip from external dhcp server. and other subscriber ping to gateway should not failed.
-        8. Enable onu port on which was disable at step 5 and ping to gateway from other subscriber.
-        9. Repeat step 3 and 4 for one subscriber where uni port is up now.
-        10. Verify that subscriber should get ip from external dhcp server. and other subscriber ping to gateway should not failed.
-        """
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_disabling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Disable the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from external dhcp server. and other subscriber ping to gateway should failed.
-        """
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_toggling_olt(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Disable the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from external dhcp server. and other subscriber ping to gateway should failed.
-        8. Enable the olt device which is detected in voltha.
-        9. Verify that subscriber should get ip from external dhcp server. and other subscriber ping to gateway should not failed.
-        """
-
-    def test_two_subscribers_with_voltha_for_dhcprelay_with_paused_olt_detected(self):
-        """
-        Test Method:
-        0. Make sure that voltha and external dhcp server are up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue  tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Send dhcp request from two residential subscribers to external dhcp server.
-        4. Verify that subscribers had got ip from external dhcp server. successfully.
-        5. Start pinging continuously from one subscriber and repeat steps 3 and 4 for other subscriber.
-        6. Pause the olt device which is detected in voltha.
-        7. Verify that subscriber should not get ip from external dhcp server. and other subscriber ping to gateway should failed.
-        """
-
-    def test_subscriber_with_voltha_for_igmp_join_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA.
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        """
-
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_leave_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA.
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port on ONU.
-        6. Verify that multicast data packets are being recieved on join received uni port on ONU to cord-tester.
-        7. Send igmp leave for a multicast group address multi-group-addressA.
-        8. Verify that multicast data packets are not being recieved on leave sent uni port on ONU to cord-tester.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_leave_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_leave_and_again_join_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA.
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port on ONU.
-        6. Verify that multicast data packets are being recieved on join received uni port on ONU to cord-tester.
-        7. Send igmp leave for a multicast group address multi-group-addressA.
-        8. Verify that multicast data packets are not being recieved on leave sent uni port on ONU to cord-tester.
-        9. Repeat steps 4 to 6.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_leave_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_with_five_groups_joins_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for multicast group addresses multi-group-addressA,multi-group-addressB
-        5. Send multicast data traffic for two groups (multi-group-addressA and multi-group-addressB) from other uni port on ONU.
-        6. Verify that 2 groups multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 1
-        num_channels = 5
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_with_five_groups_joins_and_leave_for_one_group_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for multicast group addresses multi-group-addressA,multi-group-addressB
-        5. Send multicast data traffic for two groups (multi-group-addressA and multi-group-addressB) from other uni port on ONU.
-        6. Verify that 2 groups multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send igmp leave for a multicast group address multi-group-addressA.
-        8. Verify that multicast data packets of group(multi-group-addressA) are not being recieved on leave sent uni port on ONU to cord-tester.
-        9. Verify that multicast data packets of group (multi-group-addressB) are being recieved on join sent uni port on ONU to cord-tester.
-        """
-        """Test subscriber join next for channel surfing with 3 subscribers browsing 3 channels each"""
-        num_subscribers = 1
-        num_channels = 5
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_join_different_group_src_list_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listB on ONU.
-        8. Verify that multicast data packets are not being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['2.3.4.5','3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_change_to_exclude_mcast_group_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send igmp joins for a multicast group address multi-group-addressA with exclude source list src_listA
-        8. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        9. Verify that multicast data packets are not being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_exclude, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['2.3.4.5','3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_change_to_include_back_from_exclude_mcast_group_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source exclude list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are not being recieved on join sent uni port on ONU to cord-tester.
-        7. Send igmp joins for a multicast group address multi-group-addressA with allow source list src_listA
-        8. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        9. Verify that multicast data packets are being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_exclude_again_include_back, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['2.3.4.5','3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_change_to_block_src_list_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send igmp joins for a multicast group address multi-group-addressA with block source list src_listA
-        8. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        9. Verify that multicast data packets are not being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_block, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['2.3.4.5','3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_allow_new_src_list_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source exclude list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send igmp joins for a multicast group address multi-group-addressA with allow new source list src_listB
-        8. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listB on ONU.
-        9. Verify that multicast data packets are being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_block_again_allow_back, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['2.3.4.5','3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscriber_with_voltha_for_igmp_group_include_empty_src_list_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source exclude list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are not being recieved on join sent uni port on ONU to cord-tester.
-        7. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listB on ONU.
-        8. Verify that multicast data packets are not being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_group_include_source_empty_list, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['0'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_subscribers_with_voltha_for_igmp_group_exclude_empty_src_list_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA with source exclude list src_listA
-        5. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listA on ONU.
-        6. Verify that multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        7. Send multicast data traffic for a group (multi-group-addressA) from other uni port with source ip as src_listB on ONU.
-        8. Verify that multicast data packets are being recieved on join sent uni port on ONU from other source list to cord-tester.
-        """
-
-        num_subscribers = 1
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_group_exclude_source_empty_list, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['0'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_two_subscribers_with_voltha_for_igmp_join_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressB from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-
-        num_subscribers = 2
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['1.2.3.4'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_two_subscribers_with_voltha_for_igmp_join_leave_for_one_subscriber_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Send igmp leave for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        10. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-        num_subscribers = 2
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_exclude, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['1.2.3.4','2.3.4.5'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_two_subscribers_with_voltha_for_igmp_leave_join_for_one_subscriber_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp leave for a multicast group address multi-group-addressB from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast group adress (multi-group-addressA) data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast group adress (multi-group-addressB) data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Send igmp join for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        10. Verify that multicast of group (multi-group-addressA) data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast of group (multi-group-addressA) data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Verify that multicast of group (multi-group-addressB) data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-
-        num_subscribers = 2
-        num_channels = 1
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check_join_change_to_exclude_again_include_back, None, None)
-        self.voltha_subscribers(services, cbs = cbs, src_list = ['1.2.3.4', '3.4.5.6'],
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_with_uni_port_down_for_one_subscriber_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable uni_2 port which is being shown on voltha CLI.
-        10. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-        #rx_port = self.port_map['ports'][port_list[i][1]]
-        df = defer.Deferred()
-        def igmp_flow_check_operating_onu_admin_state(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check_during_olt_onu_operational_issues, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-	    thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread2 = threading.Thread(target = self.voltha_uni_port_toggle, args = (self.port_map['ports'][port_list[1][1]],))
-            thread1.start()
-            time.sleep(randint(40,50))
-            log_test.info('Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, False)
-                log_test.info('Igmp flow check expected to fail, hence ignore the test_status of igmp flow check')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_onu_admin_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_toggling_uni_port_for_one_subscriber_and_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable uni_2 port which is being shown on voltha CLI.
-        10. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Enable uni_2 port which we disable at step 9.
-        13. Repeat step 5,6 and 8.
-        """
-        df = defer.Deferred()
-        def igmp_flow_check_operating_onu_admin_state(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread2 = threading.Thread(target = self.voltha_uni_port_toggle, args = (self.port_map['ports'][port_list[1][1]],))
-            thread1.start()
-            time.sleep(randint(50,60))
-            log_test.info('Admin state of uni port is down and up after delay of 30 sec during tls auth flow check on voltha')
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                log_test.info('Igmp flow check expected to fail during UNI port down only, after UNI port is up it should be successful')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_onu_admin_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_disabling_olt_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable olt device which is being shown on voltha CLI.
-        10. Verify that multicast data packets are not being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-        df = defer.Deferred()
-        def igmp_flow_check_operating_olt_admin_disble(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check_during_olt_onu_operational_issues, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(50,60))
-            thread2 = threading.Thread(target = self.voltha.disable_device, args = (self.olt_device_id, False,))
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, False)
-                log_test.info('Igmp flow check expected to fail during olt device is disabled, so ignored test_status of this test')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_olt_admin_disble, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_pausing_olt_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Pause olt device which is being shown on voltha CLI.
-        10. Verify that multicast data packets are not being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        """
-        df = defer.Deferred()
-        def igmp_flow_check_operating_olt_admin_pause(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check_during_olt_onu_operational_issues, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(50,60))
-            thread2 = threading.Thread(target = self.voltha.pause_device, args = (self.olt_device_id,))
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, False)
-                log_test.info('Igmp flow check expected to fail during olt device is paused, so ignored test_status of this test')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_olt_admin_pause, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_toggling_olt_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable olt device which is being shown on voltha CLI.
-        10. Verify that multicast data packets are not being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Enable olt device which is disable at step 9.
-        13. Repeat steps 4,5, 7 and 8.
-        """
-        df = defer.Deferred()
-        def igmp_flow_check_operating_olt_admin_restart(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(50,60))
-            thread2 = threading.Thread(target = self.voltha.restart_device, args = (self.olt_device_id,))
-            thread2.start()
-            time.sleep(10)
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                log_test.info('Igmp flow check expected to fail during olt device restart, After OLT device is up, it should be successful')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_olt_admin_restart, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_multiple_times_disabling_olt_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable olt device which is being shown on voltha CLI.
-        10. Verify that multicast data packets are not being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Repeat steps  4 to 11 steps multiple times (example 20 times)
-        """
-        df = defer.Deferred()
-        no_iterations = 20
-        def igmp_flow_check_operating_olt_admin_disble(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(30,40))
-            for i in range(no_iterations):
-                thread2 = threading.Thread(target = self.voltha.disable_device, args = (self.olt_device_id, False,))
-                thread2.start()
-                time.sleep(8)
-                thread2.join()
-            thread1.join()
-            thread1.isAlive()
-            thread2.join()
-            try:
-                assert_equal(self.success, False)
-                log_test.info('Igmp flow check expected to fail during olt device is disabled, so ignored test_status of this test')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_olt_admin_disble, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT + 200)
-    def test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_uni_port_for_one_subscriber_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable uni_2 port which is being shown on voltha CLI.
-        10. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Enable uni_2 port which we disable at step 9.
-        13. Repeat step 5,6 and 8.
-        14. Repeat steps  4 to 13 steps multiple times (example 5 times)
-        """
-        df = defer.Deferred()
-        no_iterations = 5
-        def igmp_flow_check_operating_onu_admin_state(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(40,60))
-            for i in range(no_iterations):
-                thread2 = threading.Thread(target = self.voltha_uni_port_toggle, args = (self.port_map['ports'][port_list[1][1]],))
-                log_test.info('Admin state of uni port is down and up after delay of 30 sec during igmp flow check on voltha')
-                thread2.start()
-                time.sleep(1)
-                thread2.join()
-            thread1.isAlive()
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                log_test.info('Igmp flow check expected to fail during UNI port down only, after UNI port is up it should be successful')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_onu_admin_state, df)
-        return df
-
-    @deferred(TESTCASE_TIMEOUT)
-    def test_two_subscribers_with_voltha_for_igmp_multiple_times_toggling_olt_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue tls auth packets from CORD TESTER voltha test module acting as a subscriber..
-        3. Issue dhcp client packets to get IP address from dhcp server for a subscriber and check connectivity.
-        4. Send igmp joins for a multicast group address multi-group-addressA from one subscribers (uni_1 port)
-        5. Send igmp joins for a multicast group address multi-group-addressA from other subscribers ( uni_2 port)
-        6. Send multicast data traffic for a group (multi-group-addressA) from other uni_3 port on ONU.
-        7. Verify that multicast data packets are being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        8. Verify that multicast data packets are being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        9. Disable olt device which is being shown on voltha CLI.
-        10. Verify that multicast data packets are not being recieved on join sent uni (uni_1) port on ONU to cord-tester.
-        11. Verify that multicast data packets are not being recieved on join sent uni (uni_2) port on ONU to cord-tester.
-        12. Enable olt device which is disable at step 9.
-        13. Repeat steps 4,5, 7 and 8.
-        14. Repeat steps  4 to 13 steps multiple times (example 10 times)
-        """
-        df = defer.Deferred()
-        no_iterations = 10
-        def igmp_flow_check_operating_olt_admin_restart(df):
-            num_subscribers = 2
-            num_channels = 1
-            services = ('IGMP')
-            cbs = (self.igmp_flow_check, None, None)
-            port_list = self.generate_port_list(num_subscribers, num_channels)
-
-            thread1 = threading.Thread(target = self.voltha_subscribers, args = (services, cbs, 2, 1, ['1.2.3.4', '3.4.5.6'],))
-            thread1.start()
-            time.sleep(randint(50,60))
-            for i in range(no_iterations):
-                thread2 = threading.Thread(target = self.voltha.restart_device, args = (self.olt_device_id,))
-                thread2.start()
-                time.sleep(10)
-                thread2.join()
-            thread1.join()
-            thread2.join()
-            try:
-                assert_equal(self.success, True)
-                log_test.info('Igmp flow check expected to fail during olt device restart, after OLT device is up, it should be successful')
-                time.sleep(10)
-            finally:
-                pass
-            df.callback(0)
-        reactor.callLater(0, igmp_flow_check_operating_olt_admin_restart, df)
-        return df
-
-    def test_five_subscribers_with_voltha_for_igmp_with_ten_group_joins_verifying_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue multiple tls auth packets from CORD TESTER voltha test module acting as subscribers..
-        3. Issue multiple dhcp client packets to get IP address from dhcp server for as subscribers and check connectivity.
-        4. Send multiple igmp joins for 10 multicast group addresses multi-group-addressA,multi-group-addressB etc
-        5. Send multicast data traffic for two groups (multi-group-addressA and multi-group-addressB) from other uni port on ONU.
-        6. Verify that 2 groups multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        """
-
-        num_subscribers = 5
-        num_channels = 10
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
-
-    def test_nine_subscribers_with_voltha_for_igmp_with_ten_group_joins_and_verify_traffic(self):
-        """
-        Test Method:
-        0. Make sure that voltha is up and running on CORD-POD setup.
-        1. OLT and ONU is detected and validated.
-        2. Issue multiple tls auth packets from CORD TESTER voltha test module acting as subscribers..
-        3. Issue multiple dhcp client packets to get IP address from dhcp server for subscribers and check connectivity.
-        4. Send multiple igmp joins for 10 multicast group addresses multi-group-addressA,multi-group-addressB etc
-        5. Send multicast data traffic for two groups (multi-group-addressA and multi-group-addressB) from other uni port on ONU.
-        6. Verify that 2 groups multicast data packets are being recieved on join sent uni port on ONU to cord-tester.
-        """
-        num_subscribers = 9
-        num_channels = 10
-        services = ('IGMP')
-        cbs = (self.igmp_flow_check, None, None)
-        self.voltha_subscribers(services, cbs = cbs,
-                                    num_subscribers = num_subscribers,
-                                    num_channels = num_channels)
diff --git a/src/test/vrouter/__init__.py b/src/test/vrouter/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/vrouter/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/vrouter/vrouterTest.py b/src/test/vrouter/vrouterTest.py
deleted file mode 100644
index 7554ddb..0000000
--- a/src/test/vrouter/vrouterTest.py
+++ /dev/null
@@ -1,687 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-from nose.tools import *
-from scapy.all import *
-from CordTestUtils import get_mac, log_test
-from OnosCtrl import OnosCtrl
-from OltConfig import OltConfig
-from OnosFlowCtrl import OnosFlowCtrl
-from onosclidriver import OnosCliDriver
-#from quaggaclidriver import QuaggaCliDriver
-from CordContainer import Container, Onos, Quagga
-from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart, cord_test_quagga_stop, cord_test_quagga_shell
-from portmaps import g_subscriber_port_map
-from CordLogger import CordLogger
-import threading
-import time
-import os
-import json
-import pexpect
-
-#from cli import quagga
-#from quagga import *
-#from cli import requires
-#from cli import system
-#from generic import *
-
-log_test.setLevel('INFO')
-
-class vrouter_exchange(CordLogger):
-
-    apps = ('org.onosproject.proxyarp', 'org.onosproject.hostprovider', 'org.onosproject.vrouter', 'org.onosproject.fwd')
-    device_id = 'of:' + get_mac()
-    vrouter_device_dict = { "devices" : {
-                "{}".format(device_id) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-             },
-          }
-    zebra_conf = '''
-password zebra
-log stdout
-service advanced-vty
-!
-!debug zebra rib
-!debug zebra kernel
-!debug zebra fpm
-!
-!interface eth1
-! ip address 10.10.0.3/16
-line vty
- exec-timeout 0 0
-'''
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    quagga_config_path = os.path.join(test_path, '..', 'setup/quagga-config')
-    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
-    GATEWAY = '192.168.10.50'
-    INGRESS_PORT = 1
-    EGRESS_PORT = 2
-    MAX_PORTS = 100
-    peer_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
-    network_list = []
-    network_mask = 24
-    default_routes_address = ('11.10.10.0/24',)
-    default_peer_address = peer_list
-    quagga_ip = os.getenv('QUAGGA_IP')
-
-    @classmethod
-    def setUpClass(cls):
-        ''' Activate the vrouter apps'''
-        cls.olt = OltConfig()
-        cls.port_map, _ = cls.olt.olt_port_map()
-        if not cls.port_map:
-            cls.port_map = g_subscriber_port_map
-        time.sleep(3)
-        cls.load_device_id()
-
-    @classmethod
-    def tearDownClass(cls):
-        '''Deactivate the vrouter apps'''
-        #cls.vrouter_host_unload()
-        cls.start_onos(network_cfg = {})
-
-    @classmethod
-    def load_device_id(cls):
-        did = OnosCtrl.get_device_id()
-        cls.device_id = did
-        cls.vrouter_device_dict = { "devices" : {
-                "{}".format(did) : {
-                    "basic" : {
-                        "driver" : "softrouter"
-                    }
-                }
-            },
-        }
-
-    @classmethod
-    def activate_apps(cls, deactivate = False):
-        for app in cls.apps:
-            onos_ctrl = OnosCtrl(app)
-            if deactivate is False:
-                onos_ctrl.activate()
-            else:
-                onos_ctrl.deactivate()
-            time.sleep(2)
-
-    def cliEnter(self):
-        retries = 0
-        while retries < 3:
-            self.cli = OnosCliDriver(connect = True)
-            if self.cli.handle:
-                break
-            else:
-                retries += 1
-                time.sleep(2)
-
-    def cliExit(self):
-        self.cli.disconnect()
-
-    @classmethod
-    def onos_load_config(cls, config):
-        status, code = OnosCtrl.config(config)
-        if status is False:
-            log_test.info('JSON request returned status %d' %code)
-            assert_equal(status, True)
-
-    @classmethod
-    def vrouter_config_get(cls, networks = 4, peers = 1, peer_address = None,
-                           route_update = None, router_address = None):
-        vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers,
-                                                    peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-        ##ONOS router does not support dynamic reconfigurations
-        #for config in vrouter_configs:
-        #    cls.onos_load_config(config)
-        #    time.sleep(5)
-
-    @classmethod
-    def vrouter_host_load(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            log_test.info('Assigning ip %s to interface %s' %(host, iface))
-            config_cmds = ( 'ifconfig {} 0'.format(iface),
-                            'ifconfig {0} {1}'.format(iface, host),
-                            'arping -I {0} {1} -c 2'.format(iface, host),
-                            )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def vrouter_host_unload(cls, peer_address = None):
-        index = 1
-        peer_info = peer_address if peer_address is not None else cls.peer_list
-
-        for host,_ in peer_info:
-            iface = cls.port_map[index]
-            index += 1
-            config_cmds = ('ifconfig {} 0'.format(iface), )
-            for cmd in config_cmds:
-                os.system(cmd)
-
-    @classmethod
-    def start_onos(cls, network_cfg = None):
-        if type(network_cfg) is tuple:
-            res = []
-            for v in network_cfg:
-                res += v.items()
-            config = dict(res)
-        else:
-            config = network_cfg
-        log_test.info('Restarting ONOS with new network configuration')
-        return cord_test_onos_restart(config = config)
-
-    @classmethod
-    def start_quagga(cls, networks = 4, peer_address = None, router_address = None):
-        log_test.info('Restarting Quagga container with configuration for %d networks' %(networks))
-        config = cls.generate_conf(networks = networks, peer_address = peer_address, router_address = router_address)
-        if networks <= 10000:
-            boot_delay = 25
-        else:
-            delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
-            n = min(networks/100000, len(delay_map)-1)
-            boot_delay = delay_map[n]
-        cord_test_quagga_restart(config = config, boot_delay = boot_delay)
-
-    @classmethod
-    def generate_vrouter_conf(cls, networks = 4, peers = 1, peer_address = None, router_address = None):
-        num = 0
-        if peer_address is None:
-           start_peer = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
-           end_peer =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
-        else:
-           ip = peer_address[0][0]
-           start_ip = ip.split('.')
-           start_peer = ( int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_peer =   ((int(start_ip[0]) + 8) << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        local_network = end_peer + 1
-        ports_dict = { 'ports' : {} }
-        interface_list = []
-        peer_list = []
-        for n in xrange(start_peer, end_peer, 256):
-            port_map = ports_dict['ports']
-            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
-            device_port_key = '{0}/{1}'.format(cls.device_id, port)
-            try:
-                interfaces = port_map[device_port_key]['interfaces']
-            except:
-                port_map[device_port_key] = { 'interfaces' : [] }
-                interfaces = port_map[device_port_key]['interfaces']
-            ip = n + 2
-            peer_ip = n + 1
-            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
-            peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
-            mac = RandMAC()._fix()
-            peer_list.append((peer, mac))
-            if num < cls.MAX_PORTS - 1:
-                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
-                interfaces.append(interface_dict)
-                interface_list.append(interface_dict['name'])
-            else:
-                interfaces[0]['ips'].append(ips)
-            num += 1
-            if num == peers:
-                break
-        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
-        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
-        quagga_router_dict['ospfEnabled'] = True
-        quagga_router_dict['interfaces'] = interface_list
-        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-
-        #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
-        bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
-        speaker_dict = {}
-        speaker_dict['name'] = 'bgp{}'.format(peers+1)
-        speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
-        speaker_dict['peers'] = peer_list
-        bgp_speakers_list.append(speaker_dict)
-        cls.peer_list = peer_list
-        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
-
-    @classmethod
-    def generate_conf(cls, networks = 4, peer_address = None, router_address = None):
-        num = 0
-        if router_address is None:
-            start_network = ( 11 << 24) | ( 10 << 16) | ( 10 << 8) | 0
-            end_network =   ( 172 << 24 ) | ( 0 << 16)  | (0 << 8) | 0
-            network_mask = 24
-        else:
-           ip = router_address
-           start_ip = ip.split('.')
-           network_mask = int(start_ip[3].split('/')[1])
-           start_ip[3] = (start_ip[3].split('/'))[0]
-           start_network = (int(start_ip[0]) << 24) | ( int(start_ip[1]) << 16)  |  ( int(start_ip[2]) << 8) | 0
-           end_network = (172 << 24 ) | (int(start_ip[1]) << 16)  |  (int(start_ip[2]) << 8) | 0
-        net_list = []
-        peer_list = peer_address if peer_address is not None else cls.peer_list
-        network_list = []
-        for n in xrange(start_network, end_network, 256):
-            net = '%d.%d.%d.0'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
-            network_list.append(net)
-            gateway = peer_list[num % len(peer_list)][0]
-            net_route = 'ip route {0}/{1} {2}'.format(net, network_mask, gateway)
-            net_list.append(net_route)
-            num += 1
-            if num == networks:
-                break
-        cls.network_list = network_list
-        cls.network_mask = network_mask
-        zebra_routes = '\n'.join(net_list)
-        #log_test.info('Zebra routes: \n:%s\n' %cls.zebra_conf + zebra_routes)
-        return cls.zebra_conf + zebra_routes
-
-    @classmethod
-    def vrouter_activate(cls, deactivate = False):
-        app = 'org.onosproject.vrouter'
-        onos_ctrl = OnosCtrl(app)
-        if deactivate is True:
-            onos_ctrl.deactivate()
-        else:
-            onos_ctrl.activate()
-        time.sleep(3)
-
-    @classmethod
-    def vrouter_configure(cls, networks = 4, peers = 1, peer_address = None,
-                          route_update = None, router_address = None, time_expire = None, adding_new_routes = None):
-        vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers,
-                                                 peer_address = peer_address, route_update = route_update)
-        cls.start_onos(network_cfg = vrouter_configs)
-        cls.activate_apps()
-        time.sleep(5)
-        cls.vrouter_host_load()
-        ##Start quagga
-        cls.start_quagga(networks = networks, peer_address = peer_address, router_address = router_address)
-        return vrouter_configs
-
-    def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        self.success = False if positive_test else True
-        timeout = 10 if positive_test else 1
-        count = 2 if positive_test else 1
-        self.start_sending = True
-        def recv_task():
-            def recv_cb(pkt):
-                log_test.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
-                self.success = True if positive_test else False
-            sniff(count=count, timeout=timeout,
-                  lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
-                  prn = recv_cb, iface = self.port_map[ingress])
-            self.start_sending = False
-
-        t = threading.Thread(target = recv_task)
-        t.start()
-        L2 = Ether(src = src_mac, dst = dst_mac)
-        L3 = IP(src = src_ip, dst = dst_ip)
-        pkt = L2/L3
-        log_test.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
-                 (dst_ip, dst_mac, self.port_map[egress]))
-        while self.start_sending is True:
-            sendp(pkt, count=50, iface = self.port_map[egress])
-        t.join()
-        assert_equal(self.success, True)
-
-    def vrouter_traffic_verify(self, positive_test = True, peer_address = None):
-        if peer_address is None:
-            peers = len(self.peer_list)
-            peer_list = self.peer_list
-        else:
-            peers = len(peer_address)
-            peer_list = peer_address
-        egress = peers + 1
-        num = 0
-        num_hosts = 5 if positive_test else 1
-        src_mac = '00:00:00:00:00:02'
-        src_ip = '1.1.1.1'
-        if self.network_mask != 24:
-            peers = 1
-        for network in self.network_list:
-            num_ips = num_hosts
-            octets = network.split('.')
-            for i in xrange(num_ips):
-                octets[-1] = str(int(octets[-1]) + 1)
-                dst_ip = '.'.join(octets)
-                dst_mac = peer_list[ num % peers ] [1]
-                port = (num % peers)
-                ingress = port + 1
-                #Since peers are on the same network
-                ##Verify if flows are setup by sending traffic across
-                self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
-            num += 1
-
-    def __vrouter_network_verify(self, networks, peers = 1, positive_test = True,
-                                 start_network = None, start_peer_address = None, route_update = None,
-                                 invalid_peers = None, time_expire = None, unreachable_route_traffic = None,
-                                 deactivate_activate_vrouter = None, adding_new_routes = None):
-
-        _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers,
-                                                          peer_address = start_peer_address,
-                                                          route_update = route_update,
-                                                          router_address = start_network,
-                                                          time_expire = time_expire,
-                                                          adding_new_routes = adding_new_routes)
-        self.cliEnter()
-        ##Now verify
-        hosts = json.loads(self.cli.hosts(jsonFormat = True))
-        log_test.info('Discovered hosts: %s' %hosts)
-        ##We read from cli if we expect less number of routes to avoid cli timeouts
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            #log_test.info('Routes: %s' %routes)
-            if start_network is not None:
-               if start_network.split('/')[1] is 24:
-                  assert_equal(len(routes['routes4']), networks)
-               if start_network.split('/')[1] is not 24:
-                  assert_equal(len(routes['routes4']), 1)
-            if start_network is None and invalid_peers is None:
-               assert_equal(len(routes['routes4']), networks)
-            if invalid_peers is not None:
-               assert_equal(len(routes['routes4']), 0)
-            flows = json.loads(self.cli.flows(jsonFormat = True))
-            flows = filter(lambda f: f['flows'], flows)
-            #log_test.info('Flows: %s' %flows)
-            assert_not_equal(len(flows), 0)
-        if invalid_peers is None:
-            self.vrouter_traffic_verify()
-        if positive_test is False:
-            self.__vrouter_network_verify_negative(networks, peers = peers)
-        if time_expire is True:
-            self.start_quagga(networks = networks, peer_address = start_peer_address, router_address = '12.10.10.1/24')
-            self.vrouter_traffic_verify()
-        if unreachable_route_traffic is True:
-            network_list_backup = self.network_list
-            self.network_list = ['2.2.2.2','3.3.3.3','4.4.4.4','5.5.5.5']
-            self.vrouter_traffic_verify(positive_test = False)
-            self.network_list = network_list_backup
-        if deactivate_activate_vrouter is True:
-            log_test.info('Deactivating vrouter app in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = True)
-            #routes = json.loads(self.cli.routes(jsonFormat = False, cmd_exist = False))
-            #assert_equal(len(routes['routes4']), 'Command not found')
-            log_test.info('Activating vrouter app again in ONOS controller for negative scenario')
-            self.vrouter_activate(deactivate = False)
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            assert_equal(len(routes['routes4']), networks)
-            self.vrouter_traffic_verify()
-        self.cliExit()
-        self.vrouter_host_unload()
-        return True
-
-    def __vrouter_network_verify_negative(self, networks, peers = 1):
-        ##Stop quagga. Test traffic again to see if flows were removed
-        log_test.info('Stopping Quagga container')
-        cord_test_quagga_stop()
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            #Verify routes have been removed
-            if routes and routes.has_key('routes4'):
-                assert_equal(len(routes['routes4']), 0)
-        self.vrouter_traffic_verify(positive_test = False)
-        log_test.info('OVS flows have been removed successfully after Quagga was stopped')
-        self.start_quagga(networks = networks)
-        ##Verify the flows again after restarting quagga back
-        if networks <= 10000:
-            routes = json.loads(self.cli.routes(jsonFormat = True))
-            assert_equal(len(routes['routes4']), networks)
-        self.vrouter_traffic_verify()
-        log_test.info('OVS flows have been successfully reinstalled after Quagga was restarted')
-
-    def quagga_shell(self, cmd):
-        shell_cmds = ('vtysh', '"conf t"', '"{}"'.format(cmd))
-        quagga_cmd = ' -c '.join(shell_cmds)
-        return cord_test_quagga_shell(quagga_cmd)
-
-    def test_vrouter_with_5_routes(self):
-        res = self.__vrouter_network_verify(5, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_5_routes_2_peers(self):
-        res = self.__vrouter_network_verify(5, peers = 2)
-        assert_equal(res, True)
-
-    def test_vrouter_with_6_routes_3_peers(self):
-        res = self.__vrouter_network_verify(6, peers = 3)
-        assert_equal(res, True)
-
-    def test_vrouter_with_50_routes(self):
-        res = self.__vrouter_network_verify(50, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_50_routes_5_peers(self):
-        res = self.__vrouter_network_verify(50, peers = 5)
-        assert_equal(res, True)
-
-    def test_vrouter_with_100_routes(self):
-        res = self.__vrouter_network_verify(100, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_100_routes_10_peers(self):
-        res = self.__vrouter_network_verify(100, peers = 10)
-        assert_equal(res, True)
-
-    def test_vrouter_with_300_routes(self):
-        res = self.__vrouter_network_verify(300, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_1000_routes(self):
-        res = self.__vrouter_network_verify(1000, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_10000_routes(self):
-        res = self.__vrouter_network_verify(10000, peers = 1)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_100000_routes(self):
-        res = self.__vrouter_network_verify(100000, peers = 1)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_1000000_routes(self):
-        res = self.__vrouter_network_verify(1000000, peers = 1)
-        assert_equal(res, True)
-
-    def test_vrouter_with_5_routes_stopping_quagga(self):
-        res = self.__vrouter_network_verify(5, peers = 1, positive_test = False)
-
-    def test_vrouter_with_50_routes_stopping_quagga(self):
-        res = self.__vrouter_network_verify(50, peers = 1, positive_test = False)
-
-    def test_vrouter_with_route_update(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        assert_equal(res, True)
-        peer_info = [('192.168.20.1', '00:00:00:00:01:01'), ('192.168.21.1', '00:00:00:00:02:01')]
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, route_update = True)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classA_route_update(self):
-        router_address = '11.10.10.0/8'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classB_route_update(self):
-        router_address = '11.10.10.0/16'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classless_route_update(self):
-        router_address = '11.10.10.0/12'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classA_duplicate_route_update(self):
-        router_address = '11.10.10.0/8'
-        res = self.__vrouter_network_verify(5, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classB_duplicate_route_update(self):
-        router_address = '11.10.10.0/16'
-        res = self.__vrouter_network_verify(5, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_classless_duplicate_route_update(self):
-        router_address = '11.10.10.0/12'
-        res = self.__vrouter_network_verify(5, peers = 1, positive_test = True, start_network = router_address)
-        assert_equal(res, True)
-
-    def test_vrouter_with_invalid_peers(self):
-        peer_info = [('239.255.255.250', '00:00:00:00:01:01'), ('239.255.255.240', '00:00:00:00:02:01')]
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, invalid_peers= True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_traffic_sent_between_peers_connected_to_onos(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, traffic_running_between_peers = True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_routes_time_expire(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, time_expire = True)
-        assert_equal(res, True)
-
-    def test_vrouter_with_unreachable_route(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, unreachable_route_traffic = True)
-        assert_equal(res, True)
-
-    @nottest
-    def test_vrouter_with_enabling_disabling_vrouter_app(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True, deactivate_activate_vrouter = True)
-        assert_equal(res, True)
-
-    def test_vrouter_with_adding_new_routes_in_routing_table(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        cmd = 'ip route 21.10.20.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify()
-        self.network_list = [ '21.10.20.0' ]
-        self.network_mask = 24
-        self.vrouter_traffic_verify()
-        assert_equal(res, True)
-
-    def test_vrouter_with_removing_old_routes_in_routing_table(self):
-        res = self.__vrouter_network_verify(5, peers = 2, positive_test = True)
-        cmd = 'ip route 21.10.20.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify()
-        old_network_list = self.network_list
-        old_network_mask = self.network_mask
-        self.network_list = [ '21.10.20.0' ]
-        self.network_mask = 24
-        self.vrouter_traffic_verify()
-        assert_equal(res, True)
-        cmd = 'no ip route 21.10.20.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        time.sleep(5)
-        self.vrouter_traffic_verify(positive_test = False)
-        self.network_mask = old_network_mask
-        self.network_list = old_network_list
-        self.vrouter_traffic_verify(positive_test = True)
-
-    def test_vrouter_modifying_nexthop_route_in_routing_table(self):
-        peer_info = [('192.168.10.1', '00:00:00:00:01:01'), ('192.168.11.1', '00:00:00:00:02:01')]
-        router_address = '11.10.10.0/24'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'ip route 11.10.10.0/24 192.168.20.1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-
-    def test_vrouter_deleting_alternative_nexthop_in_routing_table(self):
-        peer_info = [('192.168.10.1', '00:00:00:00:01:01'), ('192.168.11.1', '00:00:00:00:02:01')]
-        router_address = '11.10.10.0/24'
-        res = self.__vrouter_network_verify(1, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ip route 11.10.10.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        time.sleep(5)
-        self.vrouter_traffic_verify(positive_test = False)
-        assert_equal(res, True)
-
-    def test_vrouter_deleting_some_routes_in_routing_table(self):
-        peer_info = [('192.168.10.1', '00:00:00:00:01:01'), ('192.168.11.1', '00:00:00:00:02:01')]
-        router_address = '11.10.10.0/24'
-        res = self.__vrouter_network_verify(10, peers = 2, positive_test = True,
-                                            start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ip route 11.10.10.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        cmd = 'no ip route 11.10.13.0/24 192.168.11.1'
-        self.quagga_shell(cmd)
-        cmd = 'no ip route 11.10.14.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-
-    def test_vrouter_deleting_and_adding_routes_in_routing_table(self):
-        peer_info = [('192.168.10.1', '00:00:00:00:01:01'), ('192.168.11.1', '00:00:00:00:02:01')]
-        router_address = '11.10.10.0/24'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_peer_address = peer_info, start_network  = router_address)
-        cmd = 'no ip route 11.10.10.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        cmd = 'ip route 11.10.10.0/24 192.168.10.1'
-        self.quagga_shell(cmd)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-    def test_vrouter_toggling_nexthop_interface(self):
-        peer_info = [('192.168.10.1', '00:00:00:00:01:01'), ('192.168.11.1', '00:00:00:00:02:01')]
-        router_address = '11.10.10.0/24'
-        res = self.__vrouter_network_verify(1, peers = 1, positive_test = True, start_peer_address = peer_info, start_network  = router_address)
-        iface = self.port_map[1]
-        #toggle the interface to trigger host removal.
-        cmds = ('ifconfig {} down'.format(iface),
-                'sleep 2',
-                'ifconfig {} 0'.format(iface),)
-        for cmd in cmds:
-            os.system(cmd)
-        self.vrouter_traffic_verify(positive_test = False)
-        host = "192.168.10.1"
-        cmd = 'ifconfig {0} {1} up'.format(iface, host)
-        os.system(cmd)
-        #wait for arp refresh
-        time.sleep(60)
-        self.vrouter_traffic_verify(positive_test = True)
-        assert_equal(res, True)
-
-    def vrouter_scale(self, num_routes, peers = 1):
-        '''Called from scale test'''
-        return self.__vrouter_network_verify(num_routes, peers = peers)
diff --git a/src/test/vsg/__init__.py b/src/test/vsg/__init__.py
deleted file mode 100644
index d370d7c..0000000
--- a/src/test/vsg/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(subscriber_dir)
-cli_dir = os.path.join(working_dir, '../cli')
-__path__.append(cli_dir)
-
-#from nose import main as nosetest_main
-#from CordTestConfig import CordTestConfigRestore
-#nosetest_main(addplugins = [ CordTestConfigRestore() ])
diff --git a/src/test/vsg/vsgTest.json b/src/test/vsg/vsgTest.json
deleted file mode 100644
index b8dd1b6..0000000
--- a/src/test/vsg/vsgTest.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "NUM_SUBSCRIBERS" : 5,
-    "SUBSCRIBER_ACCOUNT_NUM" : 200,
-    "SUBSCRIBER_S_TAG" : 304,
-    "SUBSCRIBER_C_TAG" : 304,
-    "SUBSCRIBERS_PER_S_TAG" : 8
-}
diff --git a/src/test/vsg/vsgTest.py b/src/test/vsg/vsgTest.py
deleted file mode 100644
index 9741b2a..0000000
--- a/src/test/vsg/vsgTest.py
+++ /dev/null
@@ -1,3160 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import time
-import os
-import sys
-import json
-import requests
-from nose.tools import *
-from twisted.internet import defer
-from nose.twistedtools import reactor, deferred
-from CordTestUtils import *
-from OltConfig import OltConfig
-from onosclidriver import OnosCliDriver
-from SSHTestAgent import SSHTestAgent
-from CordLogger import CordLogger
-from VSGAccess import VSGAccess
-from CordTestUtils import log_test as log
-from CordTestConfig import setup_module, running_on_ciab, teardown_module
-from OnosCtrl import OnosCtrl
-from CordContainer import Onos
-from CordSubscriberUtils import CordSubscriberUtils, XosUtils
-log.setLevel('INFO')
-
-class vsg_exchange(CordLogger):
-    ONOS_INSTANCES = 3
-    V_INF1 = 'veth0'
-    device_id = 'of:' + get_mac()
-    TEST_IP = '8.8.8.8'
-    HOST = "10.1.0.1"
-    USER = "vagrant"
-    PASS = "vagrant"
-    head_node = os.getenv('HEAD_NODE', 'head1')
-    HEAD_NODE = head_node + '.cord.lab' if len(head_node.split('.')) == 1 else head_node
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config.json'))
-    restApiXos =  None
-    cord_subscriber = None
-    SUBSCRIBER_ACCOUNT_NUM = 200
-    SUBSCRIBER_S_TAG = 304
-    SUBSCRIBER_C_TAG = 304
-    SUBSCRIBERS_PER_S_TAG = 8
-    subscriber_info = []
-    volt_subscriber_info = []
-    restore_methods = []
-    TIMEOUT=120
-    FABRIC_PORT_HEAD_NODE = 1
-    FABRIC_PORT_COMPUTE_NODE = 2
-    APP_NAME = 'org.ciena.xconnect'
-    APP_FILE = os.path.join(test_path, '..', 'apps/xconnect-1.0-SNAPSHOT.oar')
-    NUM_SUBSCRIBERS = 5
-
-    @classmethod
-    def setUpCordApi(cls, **subscriber_config):
-        num_subscribers = subscriber_config.get('num_subscribers', cls.NUM_SUBSCRIBERS)
-        account_num = subscriber_config.get('account_num', cls.SUBSCRIBER_ACCOUNT_NUM)
-        s_tag = subscriber_config.get('s_tag', cls.SUBSCRIBER_S_TAG)
-        c_tag = subscriber_config.get('c_tag', cls.SUBSCRIBER_C_TAG)
-        subscribers_per_s_tag = subscriber_config.get('subscribers_per_s_tag', cls.SUBSCRIBERS_PER_S_TAG)
-        cls.cord_subscriber = CordSubscriberUtils(num_subscribers,
-                                                  account_num = account_num,
-                                                  s_tag = s_tag,
-                                                  c_tag = c_tag,
-                                                  subscribers_per_s_tag = subscribers_per_s_tag)
-        cls.restApiXos = XosUtils.getRestApi()
-
-    @classmethod
-    def closeVCPEAccess(cls, volt_subscriber_info):
-        OnosCtrl.uninstall_app(cls.APP_NAME, onos_ip = cls.HEAD_NODE)
-
-    @classmethod
-    def openVCPEAccess(cls, volt_subscriber_info):
-        """
-        This code is used to configure leaf switch for head node access to compute node over fabric.
-        Care is to be taken to avoid overwriting existing/default vcpe flows.
-        The access is opened for generated subscriber info which should not overlap.
-        We target the fabric onos instance on head node.
-        """
-        version = Onos.getVersion(onos_ip = cls.HEAD_NODE)
-        app_version = '1.0-SNAPSHOT'
-        major = int(version.split('.')[0])
-        minor = int(version.split('.')[1])
-        if major > 1:
-            app_version = '2.0-SNAPSHOT'
-        elif major == 1 and minor >= 10:
-            app_version = '2.0-SNAPSHOT'
-        cls.APP_FILE = os.path.join(cls.test_path, '..', 'apps/xconnect-{}.oar'.format(app_version))
-        OnosCtrl.install_app(cls.APP_FILE, onos_ip = cls.HEAD_NODE)
-        time.sleep(2)
-        s_tags = map(lambda tenant: int(tenant['voltTenant']['s_tag']), volt_subscriber_info)
-        #only get unique vlan tags
-        s_tags = list(set(s_tags))
-        devices = OnosCtrl.get_device_ids(controller = cls.HEAD_NODE)
-        if devices:
-            device_config = {}
-            for device in devices:
-                device_config[device] = []
-                for s_tag in s_tags:
-                    xconnect_config = {'vlan': s_tag, 'ports' : [ cls.FABRIC_PORT_HEAD_NODE, cls.FABRIC_PORT_COMPUTE_NODE ] }
-                    device_config[device].append(xconnect_config)
-
-            cfg = { 'apps' : { 'org.ciena.xconnect' : { 'xconnectTestConfig' : device_config } } }
-            OnosCtrl.config(cfg, controller = cls.HEAD_NODE)
-
-    @classmethod
-    def vsgSetup(cls, **subscriber_config):
-        cls.controllers = get_controllers()
-        cls.controller = cls.controllers[0]
-        cls.cli = None
-        cls.on_pod = running_on_pod()
-        cls.on_ciab = running_on_ciab()
-        cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
-        cls.vcpes = cls.olt.get_vcpes()
-        cls.vcpes_dhcp = cls.olt.get_vcpes_by_type('dhcp')
-        cls.vcpes_reserved = cls.olt.get_vcpes_by_type('reserved')
-        cls.dhcp_vcpes_reserved = [ 'vcpe{}.{}.{}'.format(i, cls.vcpes_reserved[i]['s_tag'], cls.vcpes_reserved[i]['c_tag'])
-                                    for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.untagged_dhcp_vcpes_reserved = [ 'vcpe{}'.format(i) for i in xrange(len(cls.vcpes_reserved)) ]
-        cls.container_vcpes_reserved = [ 'vsg-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_reserved ]
-        vcpe_dhcp_reserved = None
-        vcpe_container_reserved = None
-        if cls.vcpes_reserved:
-            vcpe_dhcp_reserved = cls.dhcp_vcpes_reserved[0]
-            if cls.on_pod is False:
-                vcpe_dhcp_reserved = cls.untagged_dhcp_vcpes_reserved[0]
-            vcpe_container_reserved = cls.container_vcpes_reserved[0]
-
-        cls.vcpe_dhcp_reserved = vcpe_dhcp_reserved
-        cls.vcpe_container_reserved = vcpe_container_reserved
-        dhcp_vcpe_offset = len(cls.vcpes_reserved)
-        cls.dhcp_vcpes = [ 'vcpe{}.{}.{}'.format(i+dhcp_vcpe_offset, cls.vcpes_dhcp[i]['s_tag'], cls.vcpes_dhcp[i]['c_tag'])
-                           for i in xrange(len(cls.vcpes_dhcp))  ]
-        cls.untagged_dhcp_vcpes = [ 'vcpe{}'.format(i+dhcp_vcpe_offset) for i in xrange(len(cls.vcpes_dhcp)) ]
-        cls.container_vcpes = [ 'vsg-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag']) for vcpe in cls.vcpes_dhcp ]
-        vcpe_dhcp = None
-        vcpe_container = None
-        #cache the first dhcp vcpe in the class for quick testing
-        if cls.vcpes_dhcp:
-            vcpe_container = cls.container_vcpes[0]
-            vcpe_dhcp = cls.dhcp_vcpes[0]
-            if cls.on_pod is False:
-                vcpe_dhcp = cls.untagged_dhcp_vcpes[0]
-        cls.vcpe_container = vcpe_container_reserved or vcpe_container
-        cls.vcpe_dhcp = vcpe_dhcp_reserved or vcpe_dhcp
-        VSGAccess.setUp()
-        cls.setUpCordApi(**subscriber_config)
-        if cls.on_pod is True:
-            cls.openVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
-
-    @classmethod
-    def setUpClass(cls):
-        num_subscribers = max(cls.NUM_SUBSCRIBERS, 5)
-        cls.vsgSetup(num_subscribers = num_subscribers)
-
-    @classmethod
-    def vsgTeardown(cls):
-        VSGAccess.tearDown()
-        if cls.on_pod is True:
-            cls.closeVCPEAccess(cls.cord_subscriber.volt_subscriber_info)
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.config_restore()
-        cls.vsgTeardown()
-
-    def tearDown(self):
-        self.config_restore()
-        super(vsg_exchange, self).tearDown()
-
-    def onos_shutdown(self, controller = None):
-        status = True
-        cli = Onos.cliEnter(onos_ip = controller)
-        try:
-            cli.shutdown(timeout = 10)
-        except:
-            log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
-            status = False
-
-        Onos.cliExit(cli)
-        return status
-
-    def log_set(self, level = None, app = 'org.onosproject'):
-        CordLogger.logSet(level = level, app = app, controllers = self.controllers, forced = True)
-
-    @classmethod
-    def get_dhcp(cls, vcpe, mgmt = 'eth0'):
-        """Get DHCP for vcpe interface saving management settings"""
-
-        def put_dhcp():
-            VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-
-        vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        if vcpe_ip is not None:
-            cls.restore_methods.append(put_dhcp)
-        return vcpe_ip
-
-    @classmethod
-    def config_restore(cls):
-        """Restore the vsg test configuration on test case failures"""
-        while cls.restore_methods:
-            restore_method = cls.restore_methods.pop()
-            restore_method()
-
-    def get_vsg_vcpe_pair(self):
-        vcpes = self.vcpes_dhcp
-        vcpe_containers = []
-        vsg_vcpe = {}
-        for vcp in vcpes:
-                vcpe_container = 'vsg-{}-{}'.format(vcp['s_tag'], vcp['c_tag'])
-                vcpe_containers.append(vcpe_container)
-                vsg = VSGAccess.get_vcpe_vsg(vcpe_container)
-                vsg_vcpe[vcpe_container]=str(vsg.get_ip())
-        return vsg_vcpe
-
-    def get_vcpe_containers_and_interfaces(self):
-	vcpe_containers = {}
-	vcpe_interfaces = []
-	vcpes = self.vcpes_dhcp
-	count = 0
-	for vcpe in vcpes:
-		vcpe_intf = 'vcpe{}.{}.{}'.format(count,vcpe['s_tag'],vcpe['c_tag'])
-		vcpe_interfaces.append(vcpe_intf)
-                vcpe_container = 'vsg-{}-{}'.format(vcpe['s_tag'], vcpe['c_tag'])
-                vcpe_containers[vcpe_intf] = vcpe_container
-		count += 1
-	log.info('vcpe interfaces are %s'%vcpe_interfaces)
-	log.info('vcpe containers are %s'%vcpe_containers)
-	return vcpe_interfaces,vcpe_containers
-
-    def get_vcpe_interface_dhcp_ip(self,vcpe=None):
-        if not vcpe:
-            vcpe = self.dhcp_vcpes_reserved[0]
-        st, _ = getstatusoutput('dhclient {}'.format(vcpe))
-	vcpe_ip = get_ip(vcpe)
-	return vcpe_ip
-
-    def release_vcpe_interface_dhcp_ip(self,vcpe=None):
-        if not vcpe:
-            vcpe = self.dhcp_vcpes_reserved[0]
-        st, _ = getstatusoutput('dhclient {} -r'.format(vcpe))
-        vcpe_ip = get_ip(vcpe)
-        assert_equal(vcpe_ip, None)
-
-    def add_static_route_via_vcpe_interface(self, routes, vcpe=None,dhcp_ip=True):
-	if not vcpe:
-	    vcpe = self.dhcp_vcpes_reserved[0]
-	if dhcp_ip:
-	    os.system('dhclient '+vcpe)
-	time.sleep(1)
-	for route in routes:
-	    log.info('route is %s'%route)
-	    cmd = 'ip route add ' + route + ' via 192.168.0.1 '+ 'dev ' + vcpe
-	    os.system(cmd)
-	return True
-
-    def del_static_route_via_vcpe_interface(self,routes,vcpe=None,dhcp_release=True):
-        if not vcpe:
-            vcpe = self.dhcp_vcpes_reserved[0]
-        cmds = []
-        for route in routes:
-            cmd = 'ip route del ' + route + ' via 192.168.0.1 ' + 'dev ' + vcpe
-	    os.system(cmd)
-        if dhcp_release:
-            os.system('dhclient '+vcpe+' -r')
-	return True
-
-    def vsg_for_external_connectivity(self, subscriber_index, reserved = False):
-        if reserved is True:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes_reserved[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes_reserved[subscriber_index]
-        else:
-            if self.on_pod is True:
-                vcpe = self.dhcp_vcpes[subscriber_index]
-            else:
-                vcpe = self.untagged_dhcp_vcpes[subscriber_index]
-        mgmt = 'eth0'
-        host = '8.8.8.8'
-        self.success = False
-        assert_not_equal(vcpe, None)
-        vcpe_ip = self.get_dhcp(vcpe, mgmt = mgmt)
-        assert_not_equal(vcpe_ip, None)
-        log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        log.info('Sending icmp echo requests to external network 8.8.8.8')
-        st, _ = getstatusoutput('ping -c 3 8.8.8.8')
-        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        self.restore_methods.pop()
-        assert_equal(st, 0)
-
-    def get_vsg_health_check(self, vsg_name=None):
-        if self.on_pod is False:
-            return
-        if vsg_name is None:
-            vcpe = self.container_vcpes_reserved[0]
-            vsg = VSGAccess.get_vcpe_vsg(vcpe)
-            status = vsg.get_health()
-            return status
-        else:
-            vsgs = VSGAccess.get_vsgs()
-            for vsg in vsgs:
-                if vsg.name == vsg_name:
-                    status = vsg.get_health()
-                    return status
-            return None
-
-    def test_vsg_health(self):
-        """
-        Test Method:
-        1. Login to compute node VM
-        2. Get all vSGs
-        3. Ping to all vSGs
-        4. Verifying Ping success
-        """
-        status = True
-        if self.on_pod is True:
-            status = VSGAccess.health_check()
-        assert_equal(status, True)
-
-    def test_vsg_health_check(self, vsg_name=None, verify_status=True):
-        """
-        Test Method:
-	1. If vsg name not specified, Get vsg corresponding to vcpe
-        1. Login to compute mode VM
-        3. Ping to the vSG
-        4. Verifying Ping success
-        """
-	st = self.get_vsg_health_check(vsg_name=vsg_name)
-	assert_equal(st,verify_status)
-
-    @deferred(30)
-    def test_vsg_for_vcpe(self):
-        """
-        Test Method:
-	1. Get list of all compute nodes created using Openstack
-        2. Login to compute mode VM
-        3. Get all vSGs
-        4. Verifying atleast one compute node and one vSG created
-        """
-        df = defer.Deferred()
-        def vsg_for_vcpe_df(df):
-            if self.on_pod is True:
-                vsgs = VSGAccess.get_vsgs()
-                compute_nodes = VSGAccess.get_compute_nodes()
-                time.sleep(14)
-                assert_not_equal(len(vsgs), 0)
-                assert_not_equal(len(compute_nodes), 0)
-            df.callback(0)
-        reactor.callLater(0,vsg_for_vcpe_df,df)
-        return df
-
-    def test_vsg_for_login(self):
-        """
-        Test Method:
-        1. Login to compute node VM
-        2. Get all vSGs
-        3. Verifying login to vSG is success
-        """
-        if self.on_pod is False:
-            return
-        vsgs = VSGAccess.get_vsgs()
-        vsg_access_status = map(lambda vsg: vsg.check_access(), vsgs)
-        status = filter(lambda st: st == False, vsg_access_status)
-        assert_equal(len(status), 0)
-
-    def test_vsg_for_default_route_through_testclient(self):
-	"""
-	Test Method:
-	1. Login to head node
-	2. Verifying for default route in lxc test client
-	"""
-        if self.on_pod is False:
-            return
-        ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-        cmd = "sudo lxc exec testclient -- route | grep default"
-        status, output = ssh_agent.run_cmd(cmd)
-        assert_equal(status, True)
-
-    @deferred(30)
-    def test_vsg_for_external_connectivity_through_testclient(self):
-        """
-        Test Method:
-        1. Login to head node
-        2. On head node, executing ping to 8.8.8.8 from lxc test client
-	3. Verifying for the ping success
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            ssh_agent = SSHTestAgent(host = self.HEAD_NODE, user = self.USER, password = self.PASS)
-            cmd = "sudo lxc exec testclient -- ping -c 3 8.8.8.8"
-            status, output = ssh_agent.run_cmd(cmd)
-            assert_equal( status, True)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_for_external_connectivity(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to 8.8.8.8 and Verifying ping should success
-	4. Restoring management interface configuration in  cord-tester
-        """
-        reserved = True
-        if self.on_pod:
-            reserved = self.on_ciab
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            self.vsg_for_external_connectivity(0, reserved = reserved)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_for_external_connectivity_to_google(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to www.google.com and Verifying ping should success
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            host = 'www.google.com'
-            vcpe = self.dhcp_vcpes_reserved[0]
-            mgmt = 'eth0'
-            assert_not_equal(vcpe, None)
-	    try:
-            	vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-                assert_not_equal(vcpe_ip, None)
-                log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-                log.info('Sending icmp ping requests to %s' %host)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-		assert_equal(st, 0)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-                VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    def retrieve_content_from_host_to_validate_path_mtu(self, host):
-        vcpe = self.dhcp_vcpes_reserved[0]
-        mgmt = 'eth0'
-        assert_not_equal(vcpe, None)
-        vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        assert_not_equal(vcpe_ip, None)
-        log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        log.info('Initiating get requests to %s' %host)
-        r = requests.get('http://{}'.format(host))
-        VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        return r.status_code
-
-    @deferred(30)
-    def test_vsg_to_retrieve_content_from_google_to_validate_path_mtu(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Retrieve contents from www.google.com and Verify response status is 200 ok.
-        4. This validates path mtu for end to end traffic with request to retrieve web contents in cord framework.
-           (Based on website response, size differs, needs check on MTU)
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            status_code = self.retrieve_content_from_host_to_validate_path_mtu('www.google.com')
-            assert_equal(status_code, 200)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_to_retrieve_content_from_rediff_to_validate_path_mtu(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Retrieve contents from www.rediff.com and Verify response status is 200 ok.
-        4. This validates path mtu for end to end traffic with request to retrieve web contents in cord framework.
-           (Based on website response, size differs, needs check on MTU)
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            status_code = self.retrieve_content_from_host_to_validate_path_mtu('www.rediff.com')
-            assert_equal(status_code, 200)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_to_retrieve_content_from_yahoo_to_validate_path_mtu(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Retrieve contents from www.yahoo.com and Verify response status is 200 ok.
-        4. This validates path mtu for end to end traffic with request to retrieve web contents in cord framework.
-           (Based on website response, size differs, needs check on MTU)
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            status_code = self.retrieve_content_from_host_to_validate_path_mtu('www.yahoo.com')
-            assert_equal(status_code, 200)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_to_retrieve_content_from_facebook_to_validate_path_mtu(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Retrieve contents from www.facebook.com and Verify response status is 200 ok.
-        4. This validates path mtu for end to end traffic with request to retrieve web contents in cord framework.
-           (Based on website response, size differs, needs check on MTU)
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            status_code = self.retrieve_content_from_host_to_validate_path_mtu('www.facebook.com')
-            assert_equal(status_code, 200)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-
-    @deferred(30)
-    def test_vsg_for_external_connectivity_to_invalid_host(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to www.goglee.com and Verifying ping should not success
-        4. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            host = 'www.goglee.com'
-            vcpe = self.dhcp_vcpes_reserved[0]
-            mgmt = 'eth0'
-            assert_not_equal(vcpe, None)
-	    try:
-            	vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-            	assert_not_equal(vcpe_ip, None)
-            	log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-            	log.info('Sending icmp ping requests to non existent host %s' %host)
-            	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-	    	assert_not_equal(st, 0)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_for_external_connectivity_with_ttl_1(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to 8.8.8.8 with ttl set to 1
-	4. Verifying ping should not success
-        5. Restoring management interface configuration in  cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            host = '8.8.8.8'
-            vcpe = self.dhcp_vcpes_reserved[0]
-            mgmt = 'eth0'
-            assert_not_equal(vcpe, None)
-	    try:
-            	vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        	assert_not_equal(vcpe_ip, None)
-        	log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        	log.info('Sending icmp ping requests to host %s with ttl 1' %host)
-        	st, _ = getstatusoutput('ping -c 1 -t 1 {}'.format(host))
-         	assert_not_equal(st, 0)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(60)
-    def test_vsg_for_external_connectivity_with_wan_interface_toggle_in_vcpe(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to 8.8.8.8 and Verifying ping succeeds
-	4. Now down the WAN interface of vcpe
-	5. Ping to 8.8.8.8 and Verifying ping fails
-	6. Now Up the WAN interface of vcpe
-	7. Ping to 8.8.8.8 and Verifying ping succeeds
-	8. Restoring management interface configuration in cord-tester
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            if self.on_pod is False:
-                return
-            host = '8.8.8.8'
-            mgmt = 'eth0'
-	    vcpe = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved[0]
-            assert_not_equal(vcpe_name, None)
-            assert_not_equal(vcpe, None)
-            #first get dhcp on the vcpe interface
-	    try:
-            	vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-        	assert_not_equal(vcpe_ip, None)
-        	log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        	log.info('Sending ICMP pings to host %s' %(host))
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-		if st != 0:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, 0)
-        	#bring down the wan interface and check again
-        	st = VSGAccess.vcpe_wan_down(vcpe_name)
-        	if st is False:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, True)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	if st == 0:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_not_equal(st, 0)
-        	st = VSGAccess.vcpe_wan_up(vcpe_name)
-        	if st is False:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, True)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-          	assert_equal(st, 0)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(60)
-    def test_vsg_for_external_connectivity_with_lan_interface_toggle_in_vcpe(self):
-        """
-        Test Method:
-        1. Get dhcp IP to vcpe interface in cord-tester
-        2. Verifying vcpe interface gets dhcp IP
-        3. Ping to 8.8.8.8 and Verifying ping should success
-        4. Now down the LAN interface of vcpe
-        5. Ping to 8.8.8.8 and Verifying ping should not success
-        6. Now Up the LAN interface of vcpe
-        7. Ping to 8.8.8.8 and Verifying ping should success
-        8. Restoring management interface configuration in  cord-tester
-        """
-        if self.on_pod is False:
-            return
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            host = '8.8.8.8'
-            mgmt = 'eth0'
-            vcpe = self.dhcp_vcpes_reserved[0]
-            vcpe_name = self.container_vcpes_reserved[0]
-            assert_not_equal(vcpe, None)
-            assert_not_equal(vcpe_name, None)
-            #first get dhcp on the vcpe interface
-	    try:
-            	vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)
-       	 	assert_not_equal(vcpe_ip, None)
-        	log.info('Got DHCP IP %s for %s' %(vcpe_ip, vcpe))
-        	log.info('Sending ICMP pings to host %s' %(host))
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	if st != 0:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, 0)
-        	#bring down the lan interface and check again
-        	st = VSGAccess.vcpe_lan_down(vcpe_name)
-        	if st is False:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, True)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	if st == 0:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_not_equal(st, 0)
-        	st = VSGAccess.vcpe_lan_up(vcpe_name)
-        	if st is False:
-            		VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-        	assert_equal(st, True)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, 0)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(120)
-    def test_vsg_multiple_subscribers_for_same_vcpe_instance(self):
-	"""
-	Test Method:
-	1. Create a vcpe instance
-	2. Create multiple vcpe interfaces in cord-tester with same s-tag and c-tag to access vcpe instance
-	3. Verify all the interfaces gets dhcp IP in same subnet
-	"""
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            vcpe_intfs,containers = self.get_vcpe_containers_and_interfaces()
-            for vcpe in vcpe_intfs:
-                vcpe_ip = self.get_vcpe_interface_dhcp_ip(vcpe=vcpe)
-                assert_not_equal(vcpe_ip,None)
-            for vcpe in vcpe_intfs:
-                self.release_vcpe_interface_dhcp_ip(vcpe=vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(120)
-    def test_vsg_for_multiple_subscribers_with_same_vcpe_instance_and_validate_external_connectivity(self):
-        """
-        Test Method:
-        1. Create a vcpe instance
-        2. Create multiple vcpe interfaces in cord-tester with same s-tag and c-tag to access vcpe instance
-        3. Verify all the interfaces gets dhcp IP in same subnet
-	4. From cord-tester ping to external  with vcpe interface option
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            host = '8.8.8.8'
-            vcpe_intfs, containers = self.get_vcpe_containers_and_interfaces()
-	    try:
-                for vcpe in vcpe_intfs:
-                    vcpe_ip = self.get_vcpe_interface_dhcp_ip(vcpe=vcpe)
-                    assert_not_equal(vcpe_ip,None)
-                    self.add_static_route_via_vcpe_interface([host],vcpe=vcpe,dhcp_ip=False)
-                    st, _ = getstatusoutput('ping -I {} -c 3 {}'.format(vcpe,host))
-                    assert_equal(st, 0)
-                    self.del_static_route_via_vcpe_interface([host],vcpe=vcpe,dhcp_release=False)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-        	for vcpe in vcpe_intfs:
-            	    self.release_vcpe_interface_dhcp_ip(vcpe=vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(30)
-    def test_vsg_vcpe_interface_and_validate_dhcp_ip_after_interface_toggle(self):
-        """
-        Test Method:
-        1. Create a vcpe instance
-        2. Create a vcpe interface in cord-tester
-        3. Verify the interface gets dhcp IP
-	4. Toggle the interface
-	5. Verify the interface gets dhcp IP
-        """
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-	    vcpe_intf = self.dhcp_vcpes_reserved[0]
-	    host = '8.8.8.8'
-            try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, 0)
-                os.system('ifconfig {} down'.format(vcpe_intf))
-                time.sleep(1)
-                os.system('ifconfig {} up'.format(vcpe_intf))
-		time.sleep(1)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, 0)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-		self.del_static_route_via_vcpe_interface([host], vcpe=vcpe_intf)
-	    df.callback(0)
-        reactor.callLater(0,test_external_connectivity,df)
-        return df
-
-    @deferred(TIMEOUT)
-    def test_vsg_for_external_connectivity_after_restarting_vcpe_instance(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Get dhcp ip to vcpe interface
-        3. Add static route to destination route in test container
-        4. From test container ping to destination route and verify ping success
-        5. Login to compute node and execute command to pause vcpe container
-        6. From test container ping to destination route and verify ping success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-		clock = 0
-		status = False
-		while(clock <= 20):
-			time.sleep(5)
-                	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-			if st == False:
-				status = True
-				break
-			clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0, test_external_connectivity, df)
-        return df
-
-    @nottest #Setup getting distrubed if vSG VM restart
-    @deferred(TIMEOUT)
-    def test_vsg_for_external_connectivity_after_restarting_vsg_vm(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Get dhcp ip to vcpe interface
-        3. Add static route to destination route in test container
-        4. From test container ping to destination route and verify ping success
-        5. Login to compute node and execute command to pause vcpe container
-        6. From test container ping to destination route and verify ping success
-        """
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-		vsg.reboot()
-                clock = 0
-                status = False
-                while(clock <= 30):
-                        time.sleep(5)
-                        st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                        if st == False:
-                                status = True
-                                break
-                        clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                vsg.reboot()
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0, test_external_connectivity, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_for_external_connectivity_with_vcpe_container_paused(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-	2. Get dhcp ip to vcpe interface
-	3. Add static route to destination route in test container
-        4. From test container ping to destination route and verify ping success
-        5. Login to compute node and execute command to pause vcpe container
-        6. From test container ping to destination route and verify ping success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def test_external_connectivity(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker pause {}'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-		vsg.run_cmd('sudo docker unpause {}'.format(vcpe_name))
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-		raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0, test_external_connectivity, df)
-        return df
-
-    @deferred(30)
-    def test_vsg_firewall_with_deny_destination_ip_set(self, vcpe_name=None, vcpe_intf=None):
-	"""
-	Test Method:
-	1. Get vSG corresponding to vcpe
-	2. Login to compute node
-	3. Execute iptable command on vcpe from compute node to deny a destination IP
-	4. From cord-tester ping to the denied IP address
-	5. Verifying that ping should not be successful
-	"""
-	if not vcpe_name:
-		vcpe_name = self.container_vcpes_reserved[0]
-	if not vcpe_intf:
-		vcpe_intf = self.dhcp_vcpes_reserved[0]
-	df = defer.Deferred()
-	def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-            	st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-            	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-            	assert_equal(st, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-		self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		#vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-	reactor.callLater(0, vcpe_firewall, df)
-	return df
-
-    @deferred(60)
-    def test_vsg_firewall_with_rule_to_add_and_delete_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        4. From cord-tester ping to the denied IP address
-	5. Verifying that ping should not be successful
-	6. Delete the iptable rule in  vcpe
-	7. From cord-tester ping to the denied IP address
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-	def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-	    host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-	        self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-	        st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-	        st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-	        st,_ = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-		raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-	        self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		#vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_verifying_reachability_for_non_blocked_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        4. From cord-tester ping to the denied IP address
-	5. Verifying that ping should not be successful
-	6. From cord-tester ping to the denied IP address other than the denied one
-        7. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-	def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host1 = '8.8.8.8'
-            host2 = '204.79.197.203'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-        	assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-                self.del_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_appending_rules_with_deny_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP1
-        4. From cord-tester ping to the denied IP address IP1
-        5. Verifying that ping should not be successful
-	6. Execute iptable command on vcpe from compute node to deny a destination IP2
-        6. From cord-tester ping to the denied IP address IP2
-        7. Verifying that ping should not be successful
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-	def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host1 = '8.8.8.8'
-            host2 = '204.79.197.203'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, False)
-		st,_ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-		time.sleep(1)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, out = getstatusoutput('ping -c 1 {}'.format(host2))
-		log.info('host2 ping output is %s'%out)
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD 2 -d {} -j DROP'.format(vcpe_name,host2))
-		time.sleep(1)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host2))
-                self.del_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(TIMEOUT)
-    def test_vsg_firewall_removing_one_rule_denying_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP1
-        4. Execute iptable command on vcpe from compute node to deny a destination IP2
-        5. From cord-tester ping to the denied IP address IP1
-        6. Verifying that ping should not be successful
-        7. From cord-tester ping to the denied IP address IP2
-        8. Verifying that ping should not be successful
-        9. Execute iptable command on vcpe from compute node to remove deny a destination IP2 rule
-        10. From cord-tester ping to the denied IP address IP2
-        11. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-	def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host1 = '8.8.8.8'
-            host2 = '204.79.197.203'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-                self.add_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-	        st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host2))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host2))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,False)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host1))
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host2))
-                self.del_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-		#vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_changing_rule_id_deny_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-	"""
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        5. From cord-tester ping to the denied IP address IP1
-        6. Verifying that ping should not be successful
-        9. Execute iptable command on vcpe from compute node to change the rule ID to 2 to  deny the same  destination IP
-        10. From cord-tester ping to the denied IP address IP
-        11. Verifying that ping should not be successful
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD 2 -d {} -j DROP '.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(50)
-    def test_vsg_firewall_changing_deny_rule_to_accept_dest_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        5. From cord-tester ping to the denied IP address IP1
-        6. Verifying that ping should not be successful
-        9. Execute iptable command on vcpe from compute node to accept the same  destination IP
-        10. From cord-tester ping to the accepted IP
-        11. Verifying the ping should  success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-	    vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -R FORWARD 1 -d {} -j ACCEPT'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j ACCEPT'.format(vcpe_name,host))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_denying_destination_network(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP subnet
-        4. From cord-tester ping to the denied IP address IP1 in the denied subnet
-        5. Verifying that ping should not be successful
-        6. From cord-tester ping to the denied IP address IP2 in the denied subnet
-        7. Verifying that ping should not be successful
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            network = '204.79.197.192/28'
-            host1 = '204.79.197.203'
-            host2 = '204.79.197.210'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-	        self.add_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-		st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,network))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,network))
-                self.del_static_route_via_vcpe_interface([host1,host2],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_denying_destination_network_subnet_modification(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP subnet
-        4. From cord-tester ping to the denied IP address IP1 in the denied subnet
-        5. Verifying that ping should not be successful
-        6. From cord-tester ping to the denied IP address IP2 in the denied subnet
-        7. Verifying that ping should not be successful
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            network1 = '204.79.197.192/28'
-            network2 = '204.79.197.192/27'
-            host1 = '204.79.197.203'
-            host2 = '204.79.197.210'
-            host3 = '204.79.197.224'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host1,host2,host3],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,network1))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st,False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD 2 -d {} -j DROP'.format(vcpe_name,network2))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, True)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host3))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,network1))
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,network2))
-                self.del_static_route_via_vcpe_interface([host1,host2,host3],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_with_deny_source_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a source IP
-        4. From cord-tester ping to 8.8.8.8 from the denied IP
-        5. Verifying that ping should not be successful
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		source_ip = get_ip(vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_rule_with_add_and_delete_deny_source_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a source IP
-        4. From cord-tester ping to 8.8.8.8 from the denied IP
-        5. Verifying that ping should not be successful
-	6. Delete the iptable rule in vcpe
-	7. From cord-tester ping to 8.8.8.8 from the denied IP
-	8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            source_ip = get_ip(self.vcpe_dhcp)
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-	        self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		source_ip = get_ip(vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-	        st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -s {} -j DROP'.format(vcpe_name,source_ip))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_rule_with_deny_icmp_protocol_echo_requests_type(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny icmp echo-requests type protocol packets
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-	6. Delete the iptable rule
-	7. From cord-tester ping to 8.8.8.8
-	8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-		st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-	    except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_rule_with_deny_icmp_protocol_echo_reply_type(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny icmp echo-reply type protocol packets
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Delete the iptable rule
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format('8.8.8.8'))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_changing_deny_rule_to_accept_rule_with_icmp_protocol_echo_requests_type(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny icmp echo-requests type protocol packets
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Insert another rule to accept the icmp-echo requests protocol packets
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-       	 	assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD  -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -R FORWARD 1 -p icmp --icmp-type echo-request -j ACCEPT'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-request -j DROP'.format(vcpe_name))
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-request -j ACCEPT'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_changing_deny_rule_to_accept_rule_with_icmp_protocol_echo_reply_type(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny icmp echo-reply type protocol packets
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying the ping should not success
-        6. Insert another rule to accept the icmp-echo requests protocol packets
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-       	        assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD  -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -R FORWARD 1 -p icmp --icmp-type echo-reply -j ACCEPT'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-reply -j DROP'.format(vcpe_name))
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp --icmp-type echo-reply -j ACCEPT'.format(vcpe_name))
-		self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_for_deny_icmp_protocol(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny icmp protocol packets
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Delete the iptable rule
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p icmp -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		#vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_rule_deny_icmp_protocol_and_destination_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Execute iptable command on vcpe from compute node to deny icmp protocol packets
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-	9. Delete the rule added in step 3
-	10. From cord-tester ping to 8.8.8.8
-	11. Verifying that ping should not be successful
-	12. Delete the rule added in step 6
-	13. From cord-tester ping to 8.8.8.8
-	14. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p icmp -j DROP'.format(vcpe_name))
-		st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-		st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st,False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {}  -j DROP'.format(vcpe_name,host))
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {}  -j DROP'.format(vcpe_name,host))
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(100)
-    def test_vsg_firewall_flushing_all_configured_rules(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny a destination IP
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Execute iptable command on vcpe from compute node to deny icmp protocol packets
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        9. Flush all the iptable rules configuraed in vcpe
-        10. Delete the rule added in step 6
-        11. From cord-tester ping to 8.8.8.8
-        12. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-       	 	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p icmp -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -F FORWARD'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-		vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-		status = False
-		clock = 0
-		while(clock <= 30):
-		    time.sleep(5)
-                    st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-		    if st == False:
-			status = True
-			break
-		    clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p icmp -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_deny_all_ipv4_traffic(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all ipv4 Traffic
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -4 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -4 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -4 -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_replacing_deny_rule_to_accept_rule_ipv4_traffic(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all ipv4 Traffic
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Replace the deny rule added in step 3 with accept rule
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-       	 	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -4 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -R FORWARD 1 -4 -j ACCEPT'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -4 -j DROP'.format(vcpe_name))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_deny_all_traffic_coming_on_lan_interface_in_vcpe(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all the  traffic coming on lan interface inside vcpe container
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -i eth1 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -i eth1 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -i eth1 -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_deny_all_traffic_going_out_of_wan_interface_in_vcpe(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all the  traffic going out of wan interface inside vcpe container
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -o eth0 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -o eth0 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD  -o eth0 -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_firewall_deny_all_traffic_from_lan_to_wan_in_vcpe(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all the  traffic from lan to wan interface in vcpe
-        4. From cord-tester ping to 8.8.8.8
-        5. Verifying that ping should not be successful
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to 8.8.8.8
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-	    host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -i eth1 -o eth0 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -i eth1 -o eth0 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -i eth1 -o eth0 -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_deny_all_dns_traffic(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all dns Traffic
-        4. From cord-tester ping to www.google.com
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to www.google.com
-        8. Verifying the ping should success
-        """
-	mgmt = 'eth0'
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = 'google-public-dns-a.google.com'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -p udp --dport 53 -j DROP'.format(vcpe_name))
-		vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe_intf, mgmt = mgmt)
-        	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_not_equal(st, False)
-		VSGAccess.restore_interface_config(mgmt, vcpe=vcpe_intf)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p udp --dport 53 -j DROP'.format(vcpe_name))
-                vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe_intf, mgmt = mgmt)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-		VSGAccess.restore_interface_config(mgmt, vcpe=vcpe_intf)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -p udp --dport 53 -j DROP'.format(vcpe_name))
-		VSGAccess.restore_interface_config(mgmt,vcpe=vcpe_intf)
-                raise
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(60)
-    def test_vsg_firewall_deny_all_ipv4_traffic_vcpe_container_restart(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all dns Traffic
-        4. From cord-tester ping to www.google.com
-        5. Verifying that ping should not be successful
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to www.google.com
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            if self.on_pod is False:
-                df.callback(0)
-                return
-            host = '8.8.8.8'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -4 -j DROP'.format(vcpe_name))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-		clock = 0
-		status = False
-		while(clock <= 20 ):
-		    time.sleep(5)
-		    st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-		    if st == False:
-			status = True
-			break
-		    clock += 5
-                assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -4 -j DROP'.format(vcpe_name))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0, vcpe_firewall, df)
-        return df
-
-    @deferred(40)
-    def test_vsg_nat_dnat_modifying_destination_ip(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all dns Traffic
-        4. From cord-tester ping to www.google.com
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to www.google.com
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            host = '8.8.8.8'
-	    dst_ip = '123.123.123.123'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -t nat -A PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-            finally:
-
-                vsg.run_cmd('sudo docker exec {} iptables -t nat -D PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0,vcpe_firewall,df)
-        return df
-
-    @deferred(40)
-    def test_vsg_nat_dnat_modifying_destination_ip_and_delete(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all dns Traffic
-        4. From cord-tester ping to www.google.com
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to www.google.com
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            host = '8.8.8.8'
-            dst_ip = '123.123.123.123'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -t nat -A PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-		st, _ = vsg.run_cmd('sudo docker exec {} iptables -t nat -D PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-		vsg.run_cmd('sudo docker exec {} iptables -t nat -D PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                raise
-            finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0,vcpe_firewall,df)
-        return df
-
-    @deferred(50)
-    def test_vsg_dnat_change_modifying_destination_ip_address(self, vcpe_name=None, vcpe_intf=None):
-        """
-        Test Method:
-        1. Get vSG corresponding to vcpe
-        2. Login to compute node
-        3. Execute iptable command on vcpe from compute node to deny all dns Traffic
-        4. From cord-tester ping to www.google.com
-        5. Verifying the ping should not success
-        6. Delete the iptable  rule added
-        7. From cord-tester ping to www.google.com
-        8. Verifying the ping should success
-        """
-        if not vcpe_name:
-                vcpe_name = self.container_vcpes_reserved[0]
-        if not vcpe_intf:
-                vcpe_intf = self.dhcp_vcpes_reserved[0]
-        df = defer.Deferred()
-        def vcpe_firewall(df):
-            host = '8.8.8.8'
-            dst_ip = '123.123.123.123'
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -t nat -A PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,output = vsg.run_cmd('sudo docker exec {} iptables -t nat -R PREROUTING 1  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-            finally:
-                vsg.run_cmd('sudo docker exec {} iptables -t nat -D PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,dst_ip))
-                vsg.run_cmd('sudo docker exec {} iptables -t nat -D PREROUTING  -s 192.168.0.0/16 -i eth1 -j DNAT --to-destination {}'.format(vcpe_name,host))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                #vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-            df.callback(0)
-        reactor.callLater(0,vcpe_firewall,df)
-        return df
-
-    def vsg_xos_subscriber_create(self, index, subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return ''
-        if subscriber_info is None:
-            subscriber_info = self.cord_subscriber.subscriber_info[index]
-        if volt_subscriber_info is None:
-            volt_subscriber_info = self.cord_subscriber.volt_subscriber_info[index]
-        s_tag = int(volt_subscriber_info['voltTenant']['s_tag'])
-        c_tag = int(volt_subscriber_info['voltTenant']['c_tag'])
-        vcpe = 'vsg-{}-{}'.format(s_tag, c_tag)
-        subId = self.cord_subscriber.subscriberCreate(index, subscriber_info, volt_subscriber_info)
-        if subId:
-            #if the vsg instance was already instantiated, then reduce delay
-            if c_tag % self.SUBSCRIBERS_PER_S_TAG == 0:
-                delay = 350
-            else:
-                delay = 90
-            log.info('Delaying %d seconds for the VCPE to be provisioned' %(delay))
-            time.sleep(delay)
-            log.info('Testing for external connectivity to VCPE %s' %(vcpe))
-            self.vsg_for_external_connectivity(index)
-
-        return subId
-
-    def vsg_xos_subscriber_delete(self, index, subId = '', voltId = '', subscriber_info = None, volt_subscriber_info = None):
-        if self.on_pod is False:
-            return
-        self.cord_subscriber.subscriberDelete(index, subId = subId, voltId = voltId,
-                                              subscriber_info = subscriber_info,
-                                              volt_subscriber_info = volt_subscriber_info)
-
-    def vsg_xos_subscriber_id(self, index):
-        if self.on_pod is False:
-            return ''
-        return self.cord_subscriber.subscriberId(index)
-
-    def test_vsg_xos_subscriber_create_reserved(self):
-        if self.on_pod is False:
-            return
-        tags_reserved = [ (int(vcpe['s_tag']), int(vcpe['c_tag'])) for vcpe in self.vcpes_reserved ]
-        volt_tenants = self.restApiXos.ApiGet('VOLT_TENANT')
-        subscribers = self.restApiXos.ApiGet('VOLT_SUBSCRIBER')
-        reserved_tenants = filter(lambda tenant: (int(tenant['s_tag']), int(tenant['c_tag'])) in tags_reserved, volt_tenants)
-        reserved_config = []
-        for tenant in reserved_tenants:
-            for subscriber in subscribers:
-                volt_id = self.cord_subscriber.getVoltId(subscriber)
-                provider_id = self.cord_subscriber.getProviderInstance(tenant)
-                if int(volt_id) == int(provider_id):
-                    volt_subscriber_info = {}
-                    volt_subscriber_info['voltTenant'] = dict(s_tag = tenant['s_tag'],
-                                                              c_tag = tenant['c_tag'])
-                    volt_subscriber_info['volt_id'] = volt_id
-                    volt_subscriber_info['service_specific_id'] = subscriber['service_specific_id']
-                    reserved_config.append( (subscriber, volt_subscriber_info) )
-                    break
-            else:
-                log.info('Subscriber not found for tenant. s_tag: %s, c_tag: %s' %(\
-                                                                                   str(tenant['s_tag']),\
-                                                                                   str(tenant['c_tag'])))
-
-        for subscriber_info, volt_subscriber_info in reserved_config:
-            self.vsg_xos_subscriber_delete(0,
-                                           subId = str(subscriber_info['id']),
-                                           voltId = str(volt_subscriber_info['volt_id']),
-                                           subscriber_info = subscriber_info,
-                                           volt_subscriber_info = volt_subscriber_info)
-            subId = self.vsg_xos_subscriber_create(0,
-                                                   subscriber_info = subscriber_info,
-                                                   volt_subscriber_info = volt_subscriber_info)
-            log.info('Created reserved subscriber %s' %(subId))
-
-    def vsg_create(self, num_subscribers):
-        if self.on_pod is False:
-            return
-        num_subscribers = min(num_subscribers, len(self.cord_subscriber.subscriber_info))
-        for index in xrange(num_subscribers):
-            #check if the index exists
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId and subId != '0':
-                self.vsg_xos_subscriber_delete(index, subId = subId)
-            subId = self.vsg_xos_subscriber_create(index)
-            log.info('Created Subscriber %s' %(subId))
-
-    def test_vsg_xos_subscriber_create_all(self):
-        self.vsg_create(len(self.cord_subscriber.subscriber_info))
-
-    def vsg_delete(self, num_subscribers):
-        if self.on_pod is False:
-            return
-        num_subscribers = min(num_subscribers, len(self.cord_subscriber.subscriber_info))
-        for index in xrange(num_subscribers):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId and subId != '0':
-                self.vsg_xos_subscriber_delete(index, subId = subId)
-
-    def test_vsg_xos_subscriber_delete_all(self):
-        self.vsg_delete(len(self.cord_subscriber.subscriber_info))
-
-    def __vsg_xos_subscriber_create(self, index):
-        subId = self.vsg_xos_subscriber_create(index)
-        assert_not_equal(subId, '')
-        assert_not_equal(subId, '0')
-
-    def __vsg_xos_subscriber_delete(self, index):
-        self.vsg_xos_subscriber_delete(index)
-
-    def test_vsg_xos_subscriber_create(self):
-        self.__vsg_xos_subscriber_create(0)
-
-    def test_vsg_xos_subscriber_create_2(self):
-        self.__vsg_xos_subscriber_create(1)
-
-    def test_vsg_xos_subscriber_create_3(self):
-        self.__vsg_xos_subscriber_create(2)
-
-    def test_vsg_xos_subscriber_create_4(self):
-        self.__vsg_xos_subscriber_create(3)
-
-    def test_vsg_xos_subscriber_create_5(self):
-        self.__vsg_xos_subscriber_create(4)
-
-    def test_vsg_xos_subscriber_delete(self):
-        self.__vsg_xos_subscriber_delete(0)
-
-    def test_vsg_xos_subscriber_delete_2(self):
-        self.__vsg_xos_subscriber_delete(1)
-
-    def test_vsg_xos_subscriber_delete_3(self):
-        self.__vsg_xos_subscriber_delete(2)
-
-    def test_vsg_xos_subscriber_delete_4(self):
-        self.__vsg_xos_subscriber_delete(3)
-
-    def test_vsg_xos_subscriber_delete_5(self):
-        self.__vsg_xos_subscriber_delete(4)
-
-    def test_vsg_xos_subscriber_create_and_delete(self):
-        subId = self.vsg_xos_subscriber_create(0)
-        if subId and subId != '0':
-            self.vsg_xos_subscriber_delete(0, subId)
-
-    def test_vsg_xos_subscriber_2_create_and_delete(self):
-        subId = self.vsg_xos_subscriber_create(1)
-        if subId and subId != '0':
-            self.vsg_xos_subscriber_delete(1, subId)
-
-    def test_vsg_xos_subscriber_3_create_and_delete(self):
-        subId = self.vsg_xos_subscriber_create(2)
-        if subId and subId != '0':
-            self.vsg_xos_subscriber_delete(2, subId)
-
-    def test_vsg_xos_subscriber_4_create_and_delete(self):
-        subId = self.vsg_xos_subscriber_create(3)
-        if subId and subId != '0':
-            self.vsg_xos_subscriber_delete(3, subId)
-
-    def test_vsg_xos_subscriber_5_create_and_delete(self):
-        subId = self.vsg_xos_subscriber_create(4)
-        if subId and subId != '0':
-            self.vsg_xos_subscriber_delete(4, subId)
-
-    @deferred(400)
-    def test_vsg_xos_subscriber_external_connectivity_through_vcpe_instance(self, index=0):
-        df = defer.Deferred()
-        status = False
-        def test_xos_subscriber(df):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                log.info('Creating vcpe instance ')
-		subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            vcpe = self.dhcp_vcpes[index]
-            host = '8.8.8.8'
-            self.add_static_route_via_vcpe_interface([host],vcpe=vcpe)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, False)
-	    self.del_static_route_via_vcpe_interface([host],vcpe=vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    #pass
-    @deferred(50)
-    def test_vsg_xos_subscriber_external_connectivity_without_creating_vcpe_instance(self, index=0):
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId != '0':
-		log.info('deleting already existing vcpe instance ')
-		self.vsg_xos_subscriber_delete(index, subId)
-	    vcpe = self.dhcp_vcpes[index]
-	    host = '8.8.8.8'
-	    self.add_static_route_via_vcpe_interface([host],vcpe=vcpe)
-	    st, out = getstatusoutput('route -n')
-	    log.info('route -n outpu-1-1-1--1-1-1-1-1-1-1  is %s'%out)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-	    self.del_static_route_via_vcpe_interface([host],vcpe=vcpe)
-            assert_equal(st, True)
-	    df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-	return df
-
-    @deferred(400)
-    def test_vsg_xos_subscriber_external_connectivity_after_removing_vcpe_instance_from_xos(self,index=0,host = '8.8.8.8'):
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-	    subId = self.vsg_xos_subscriber_id(index)
-	    if subId == '0':
-        	subId = self.vsg_xos_subscriber_create(index)
-	    assert_not_equal(subId,'0')
-	    vcpe = self.dhcp_vcpes[index]
-            if subId and subId != '0':
-	        self.add_static_route_via_vcpe_interface([host],vcpe=vcpe)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                self.vsg_xos_subscriber_delete(index, subId)
-	        time.sleep(2)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-	        self.del_static_route_via_vcpe_interface([host],vcpe=vcpe)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(400)
-    def test_vsg_xos_subscriber_external_connectivity_after_restarting_vcpe_instance(self, index=0, host = '8.8.8.8'):
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-	    vcpe_intf = self.dhcp_vcpes[index]
-            self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, False)
-	    vcpe_name = 'vsg-{}-{}'.format(vcpe_intf.split('.')[1],vcpe_intf.split('.')[2])
-	    vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    st, _ = vsg.run_cmd('sudo docker restart {}'.format(vcpe_name))
-	    assert_equal(st, True)
-            time.sleep(5)
-            self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, False)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(400)
-    def test_vsg_xos_subscriber_external_connectivity_toggling_vcpe_instance(self, index=0, host = '8.8.8.8'):
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            vcpe_intf = self.dhcp_vcpes[index]
-            self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, False)
-            vcpe_name = 'vsg-{}-{}'.format(vcpe_intf.split('.')[1],vcpe_intf.split('.')[2])
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            st, _ = vsg.run_cmd('sudo docker stop {}'.format(vcpe_name))
-            assert_equal(st, True)
-            time.sleep(3)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, True)
-            st, _ = vsg.run_cmd('sudo docker start {}'.format(vcpe_name))
-            assert_equal(st, True)
-            time.sleep(5)
-            st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-            assert_equal(st, False)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    #getting list out of range error while creating vcpe of index 6
-    def test_vsg_create_xos_subscribers_in_different_vsg_vm(self, index1=4, index2=6):
-	indexes = list(index1,index2)
-	subids = []
-	for index in indexes:
-        	subId = self.vsg_xos_subscriber_id(index)
-        	if not subId:
-        		subId = self.vsg_xos_subscriber_create(index)
-		assert_not_equal(subId,'0')
-		subids.append(subId)
-	log.info('succesfully created two vcpe instances in two different vSG VMs')
-	self.vsg_xos_subscriber_delete(index1, subid[0])
-	self.vsg_xos_subscriber_delete(index2, subid[1])
-
-    #Unable to reach external network via vcpes created by XOS
-    @deferred(TIMEOUT+400)
-    def test_vsg_xos_multiple_subscribers_external_connectivity_if_one_vcpe_goes_down(self):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Kill first vcpe instance
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            host1 = '8.8.8.8'
-	    host2 = '4.2.2.2'
-            vcpe_intf1 = self.dhcp_vcpes[0]
-            vcpe_intf2 = self.dhcp_vcpes[1]
-            vcpe_name1 = 'vsg-{}-{}'.format(vcpe_intf1.split('.')[1],vcpe_intf1.split('.')[2])
-            vcpe_name2 = 'vsg-{}-{}'.format(vcpe_intf2.split('.')[1],vcpe_intf2.split('.')[2])
-            subId1 = self.vsg_xos_subscriber_id(0)
-            log.info('already existing subid of index 0 is %s'%subId1)
-            if subId1 == '0':
-		log.info('creating vcpe instance of index 0')
-                subId1 = self.vsg_xos_subscriber_create(0)
-	    assert_not_equal(subId1,'0')
-            subId2 = self.vsg_xos_subscriber_id(1)
-            log.info('already existing subid of index 1 is %s'%subId2)
-            if subId2 == '0':
-		log.info('creating vcpe instance of index 1')
-                subId2 = self.vsg_xos_subscriber_create(1)
-	    assert_not_equal(subId2,'0')
-	    vsg1 = VSGAccess.get_vcpe_vsg(vcpe_name1)
-	    vsg2 = VSGAccess.get_vcpe_vsg(vcpe_name2)
-	    try:
-		for intf in [vcpe_intf1,vcpe_intf2]:
-		    host = host1 if intf is vcpe_intf1 else host2
-		    self.add_static_route_via_vcpe_interface([host],vcpe=intf)
-                    st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-                    assert_equal(st, False)
-		    if intf is vcpe_intf2:
-	    		self.vsg_xos_subscriber_delete(1, subId2)
-            		st, _  = vsg2.run_cmd('sudo docker kill {}'.format(vcpe_name2))
-            		time.sleep(2)
-			self.add_static_route_via_vcpe_interface([host],vcpe=intf)
-                        st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                        assert_equal(st, False)
-                        st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                        assert_equal(st, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-            	self.vsg_xos_subscriber_delete(0, subId1)
-            	self.vsg_xos_subscriber_delete(1, subId2)
-		self.del_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-		self.del_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(TIMEOUT+400)
-    def test_vsg_xos_subscriber_external_connectivity_after_vcpe_is_removed_and_added_again(self,index=0):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Remove first vcpe instance
-        4.Verify external network cant be reachable form first vcpe interface
-	5.Add back the removed vcpe instance
-	6.Verify external connectivity through vcpe instances from cord-tester
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df,index=index):
-            host = '8.8.8.8'
-            subId = self.vsg_xos_subscriber_id(index)
-            log.info('already existing subid of index 0 is %s'%subId)
-            if subId == '0':
-                log.info('creating vcpe instance of index %s'%index)
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            vcpe_intf = self.dhcp_vcpes[0]
-            vcpe_name = 'vsg-{}-{}'.format(vcpe_intf.split('.')[1],vcpe_intf.split('.')[2])
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-	    try:
-        	self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-	        st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-		log.info('Deleting vcpe Instance of index %s'%index)
-		self.vsg_xos_subscriber_delete(0, subId)
-        	st, _ = vsg.run_cmd('sudo docker kill {}'.format(vcpe_name))
-		time.sleep(1)
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, True)
-		subId = self.vsg_xos_subscriber_create(index)
-        	self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-        	st,_ = getstatusoutput('ping -c 1 {}'.format(host))
-        	assert_equal(st, False)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-		self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		self.vsg_xos_subscriber_delete(0, subId)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(TIMEOUT+400)
-    def test_vsg_xos_multiple_subscribers_external_connectivity_if_one_vcpe_restarts(self):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Restart first vcpe instance
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            host1 = '8.8.8.8'
-	    host2 = '4.2.2.2'
-            subId1 = self.vsg_xos_subscriber_id(0)
-            log.info('already existing subid of index 0 is %s'%subId1)
-            if subId1 == '0':
-                log.info('creating vcpe instance of index 0')
-                subId1 = self.vsg_xos_subscriber_create(0)
-            assert_not_equal(subId1,'0')
-            subId2 = self.vsg_xos_subscriber_id(1)
-            log.info('already existing subid of index 1 is %s'%subId2)
-            if subId2 == '0':
-                log.info('creating vcpe instance of index 1')
-                subId2 = self.vsg_xos_subscriber_create(1)
-            vcpe_intf1 = self.dhcp_vcpes[0]
-            vcpe_intf2 = self.dhcp_vcpes[1]
-            vcpe_name1 = 'vsg-{}-{}'.format(vcpe_intf1.split('.')[1],vcpe_intf1.split('.')[2])
-            vcpe_name2 = 'vsg-{}-{}'.format(vcpe_intf2.split('.')[1],vcpe_intf2.split('.')[2])
-            vsg1 = VSGAccess.get_vcpe_vsg(vcpe_name1)
-            vsg2 = VSGAccess.get_vcpe_vsg(vcpe_name2)
-	    try:
-		#checking external connectivity from vcpe interface 1 before vcpe 2 restart
-		self.add_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-		#checking external connectivity from vcpe interface 2 before vcpe 2 restart
-                self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, False)
-        	st, _  = vsg2.run_cmd('sudo docker restart {}'.format(vcpe_name2))
-		time.sleep(10)
-		#checking external connectivity from vcpe interface 1 after vcpe 2 restart
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-		self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-		time = 0
-		status = False
-		while(time <= 100):
-		     time.sleep(10)
-		     st,_ = getstatusoutput('ping -c 1 {}'.format(hos2))
-		     if st is False:
-			status = True
-        		break
-		     time += 10
-		assert_equal(status, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-        	self.del_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-        	self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                self.vsg_xos_subscriber_delete(0, subId1)
-        	self.vsg_xos_subscriber_delete(1, subId2)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(500)
-    def test_vsg_xos_multiple_subscribers_external_connectivity_if_one_vcpe_is_paused(self):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Pause running first vcpe instance
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            host1 = '8.8.8.8'
-            host2 = '4.2.2.2'
-            subId1 = self.vsg_xos_subscriber_id(0)
-            log.info('already existing subid of index 0 is %s'%subId1)
-            if subId1 == '0':
-                log.info('creating vcpe instance of index 0')
-                subId1 = self.vsg_xos_subscriber_create(0)
-            assert_not_equal(subId1,'0')
-            subId2 = self.vsg_xos_subscriber_id(1)
-            log.info('already existing subid of index 1 is %s'%subId2)
-            if subId2 == '0':
-                log.info('creating vcpe instance of index 1')
-                subId2 = self.vsg_xos_subscriber_create(1)
-            vcpe_intf1 = self.dhcp_vcpes[0]
-            vcpe_intf2 = self.dhcp_vcpes[1]
-            vcpe_name1 = 'vsg-{}-{}'.format(vcpe_intf1.split('.')[1],vcpe_intf1.split('.')[2])
-            vcpe_name2 = 'vsg-{}-{}'.format(vcpe_intf2.split('.')[1],vcpe_intf2.split('.')[2])
-            vsg1 = VSGAccess.get_vcpe_vsg(vcpe_name1)
-            vsg2 = VSGAccess.get_vcpe_vsg(vcpe_name2)
-            try:
-                #checking external connectivity from vcpe interface 1 before vcpe 2 pause
-                self.add_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                #checking external connectivity from vcpe interface 2 before vcpe 2 pause
-                self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, False)
-                st, _  = vsg2.run_cmd('sudo docker pause {}'.format(vcpe_name2))
-                time.sleep(1)
-                #checking external connectivity from vcpe interface 1 after vcpe 2 pause
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                #checking external connectivity from vcpe interface 2 after vcpe 2 pause
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                log.info('In Finally block 3333333333333333')
-		st, _  = vsg2.run_cmd('sudo docker unpause {}'.format(vcpe_name2))
-                self.del_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-                self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                self.vsg_xos_subscriber_delete(0, subId1)
-                self.vsg_xos_subscriber_delete(1, subId2)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(500)
-    def test_vsg_xos_subscriber_external_connectivity_if_one_vcpe_stops(self):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Stop running first vcpe instance
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df):
-            host1 = '8.8.8.8'
-            host2 = '4.2.2.2'
-            subId1 = self.vsg_xos_subscriber_id(0)
-            log.info('already existing subid of index 0 is %s'%subId1)
-            if subId1 == '0':
-                log.info('creating vcpe instance of index 0')
-                subId1 = self.vsg_xos_subscriber_create(0)
-            assert_not_equal(subId1,'0')
-            subId2 = self.vsg_xos_subscriber_id(1)
-            log.info('already existing subid of index 1 is %s'%subId2)
-            if subId2 == '0':
-                log.info('creating vcpe instance of index 1')
-                subId2 = self.vsg_xos_subscriber_create(1)
-            vcpe_intf1 = self.dhcp_vcpes[0]
-            vcpe_intf2 = self.dhcp_vcpes[1]
-            vcpe_name1 = 'vsg-{}-{}'.format(vcpe_intf1.split('.')[1],vcpe_intf1.split('.')[2])
-            vcpe_name2 = 'vsg-{}-{}'.format(vcpe_intf2.split('.')[1],vcpe_intf2.split('.')[2])
-            vsg1 = VSGAccess.get_vcpe_vsg(vcpe_name1)
-            vsg2 = VSGAccess.get_vcpe_vsg(vcpe_name2)
-            try:
-                #checking external connectivity from vcpe interface 1 before vcpe 2 stop
-                self.add_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                #checking external connectivity from vcpe interface 2 before vcpe 2 stop
-                self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, False)
-                st, _  = vsg2.run_cmd('sudo docker stop {}'.format(vcpe_name2))
-                time.sleep(5)
-                #checking external connectivity from vcpe interface 1 after vcpe 2 stop
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host1))
-                assert_equal(st, False)
-                #checking external connectivity from vcpe interface 1 after vcpe 2 stop
-		self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                st,_ = getstatusoutput('ping -c 1 {}'.format(host2))
-                assert_equal(st, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-                raise
-            finally:
-                st, _  = vsg2.run_cmd('sudo docker start {}'.format(vcpe_name2))
-                time.sleep(10)
-                self.del_static_route_via_vcpe_interface([host1],vcpe=vcpe_intf1)
-                self.add_static_route_via_vcpe_interface([host2],vcpe=vcpe_intf2)
-                self.vsg_xos_subscriber_delete(0, subId1)
-                self.vsg_xos_subscriber_delete(1, subId2)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(420)
-    def test_vsg_xos_subscriber_external_connectivity_after_vsg_vm_is_stopped(self, index=0):
-        """
-        Test Method:
-        1.Create two vcpe instances in two different vsg vms using XOS
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Bring down first vSG vm
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df,index=index):
-            host = '8.8.8.8'
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                log.info('creating vcpe instance of index 0')
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            vcpe_intf = self.dhcp_vcpes[index] #'vcpe{}.{}.{}'.format(s_tag, c_tag)
-            vcpe_name = self.container_vcpes[index]
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-		log.info('Stopping vsg instance')
-		vsg.stop()
-		time.sleep(5)
-		self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-		assert_equal(st, True)
-	    except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-		vsg.start()
-		self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		self.vsg_xos_subscriber_delete(index, subId)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(420)
-    def test_vsg_xos_subscriber_external_connectivity_after_vsg_vm_is_restarted(self, index=0):
-        """
-        Test Method:
-        1.Create subscriber
-        2.Verify external connectivity through vcpe instances from cord-tester
-        3.Bring down first vSG vm
-        4.Verify external network cant be reachable form first vcpe interface
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df,index=index):
-            host = '8.8.8.8'
-            subId = self.vsg_xos_subscriber_id(index)
-            if subId == '0':
-                log.info('creating vcpe instance of index 0')
-                subId = self.vsg_xos_subscriber_create(index)
-            assert_not_equal(subId,'0')
-            vcpe_intf = self.dhcp_vcpes[index] #'vcpe{}.{}.{}'.format(s_tag, c_tag)
-            vcpe_name = self.container_vcpes[index]
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, False)
-                log.info('Restarting vsg instance')
-                vsg.reboot()
-                time.sleep(10)
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		time = 0
-		status = False
-		while(time <= 100):
-			time.sleep(10)
-                	st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-			if st is False:
-                		status = True
-				break
-			time += 10
-		assert_equal(status, True)
-            except Exception as error:
-                log.info('Got Unexpected error %s'%error)
-	  	raise
-	    finally:
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		self.vsg_xos_subscriber_delete(index, subId)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(780)
-    def test_vsg_xos_multiple_subscribers_external_connectivity_if_two_vsgs_stop_and_start(self, index1=4, index2=6):
-	"""
-	Test Method:
-	1.Create two vcpe instances in two different vsg vms using XOS
-	2.Verify external connectivity through vcpe instances from cord-tester
-	3.Bring down first vSG vm
-	4.Verify external network cant be reachable form first vcpe interface
-	5.Bring down second vSG vm also
-	6.Verify external network cant be reachable form first vcpe interface also
-	"""
-        df = defer.Deferred(df,index1=index1,index2=index2)
-        def test_xos_subscriber(df,index=index):
-            subId1 = self.vsg_xos_subscriber_create(index1)
-            subId2 = self.vsg_xos_subscriber_create(index2)
-            if subId1 == '0':
-                self.vsg_xos_subscriber_delete(index1, subId1)
-	    assert_not_equal(subId1, '0')
-            if subId2 == '0':
-                self.vsg_xos_subscriber_delete(index2, subId2)
-	    assert_not_equal(subId2, '0')
-	    for index in [index1,index2]:
-                vcpe_intf = self.dhcp_vcpes[index] #'vcpe{}.{}.{}'.format(s_tag, c_tag)
-                vcpe_name = self.container_vcpes[index]
-                vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-		try:
-                    self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                    st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                    assert_equal(st, False)
-		    log.info('Stopping vsg instance of index %s'%index)
-		    vsg.stop()
-		    time.sleep(5)
-                    self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                    st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                    assert_equal(st, True)
-		except Exception as error:
-		    log.info('Got Unexpected error %s'%error)
-		    raise
-		finally:
-		    vsg.start()
-		    self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    @deferred(420)
-    def test_vsg_xos_subscriber_external_connectivity_with_creating_firewall_rule(self,index=0):
-        """
-        Alog:
-        1.Cretae a vcpe instance using XOS
-        2.Get dhcp IP to vcpe interface in cord-tester
-        3.Verify external network can be reachable from cord-tester
-        4.Add an iptable rule to drop packets destined to external network in vcpe
-        5.Verify now external network cant be reachable
-        6.Delele the iptable in vcpe instance
-        7.Verify external network can be reachable from cord-tester
-        """
-        df = defer.Deferred()
-        def test_xos_subscriber(df,index=index):
-            log.info('cls.dhcp_vcpes is %s'%self.dhcp_vcpes)
-            host = '8.8.8.8'
-            subId = self.vsg_xos_subscriber_create(index)
-	    if subId == '0':
-		subId = self.vsg_xos_subscriber_create(index)
-	    assert_not_equal(subId, '0')
-            vcpe_intf = self.dhcp_vcpes[index] #'vcpe{}.{}.{}'.format(s_tag, c_tag)
-	    vcpe_name = self.container_vcpes[index]
-            vsg = VSGAccess.get_vcpe_vsg(vcpe_name)
-            try:
-                self.add_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                #ssert_equal(st, False)
-                st, _ = vsg.run_cmd('sudo docker exec {} iptables -I FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                st, _ = getstatusoutput('ping -c 1 {}'.format(host))
-                assert_equal(st, True)
-                st,_ = vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-		self.vsg_xos_subscriber_delete(index, subId)
-            except Exception as error:
-		log.info('Got Unexpected error %s'%error)
-		raise
-	    finally:
-                vsg.run_cmd('sudo docker exec {} iptables -D FORWARD -d {} -j DROP'.format(vcpe_name,host))
-                self.del_static_route_via_vcpe_interface([host],vcpe=vcpe_intf)
-		self.vsg_xos_subscriber_delete(index, subId)
-            df.callback(0)
-        reactor.callLater(0,test_xos_subscriber,df)
-        return df
-
-    def test_vsg_for_packet_received_with_invalid_ip_fields(self):
-	"""
-	Test Method:
-	1.Create a vSG VM in compute node
-	2.Create a vCPE container in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-	4.From subscriber, send a ping packet with invalid ip fields
-	5.Verify that vSG drops the packet
-	6.Verify ping fails
-	"""
-
-    def test_vsg_for_packet_received_with_invalid_mac_fields(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-        2.Create a vCPE container in vSG VM
-        3.Ensure vSG VM and vCPE container created properly
-        4.From subscriber, send a ping packet with invalid mac fields
-        5.Verify that vSG drops the packet
-        6.Verify ping fails
-        """
-
-    def test_vsg_for_vlan_id_mismatch_in_stag(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute Node
-	2.Create a vCPE container in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        4.Send a ping request to external valid IP from subscriber, with incorrect vlan id in  s-tag and valid c-tag
-        5.Verify that ping fails as the packet drops at VM entry
-        6.Repeat step 4 with correct s-tag
-	7.Verify that ping success
-        """
-
-    def test_vsg_for_vlan_id_mismatch_in_ctag(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-	2.Create a vCPE container in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        4.Send a ping request to external valid IP from subscriber, with valid s-tag and incorrect vlan id in c-tag
-        5.Verify that ping fails as the packet drops at vCPE container entry
-        6.Repeat step 4 with valid s-tag and c-tag
-        7.Verify that ping success
-        """
-
-    def test_vsg_for_matching_and_mismatching_vlan_id_in_stag(self):
-        """
-        Test Method:
-        1.Create two vSG VMs in compute node
-	2.Create a vCPE container in each vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        4.From subscriber one, send ping request with valid s and c tags
-        5.From subscriber two, send ping request with vlan id mismatch in s-tag and valid c tags
-        6.Verify that ping success for only subscriber one and fails for two.
-        """
-
-    def test_vsg_for_matching_and_mismatching_vlan_id_in_ctag(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-	2.Create two vCPE containers in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        4.From subscriber one, send ping request with valid s and c tags
-        5.From subscriber two, send ping request with valid s-tag and vlan id mismatch in c-tag
-        6.Verify that ping success for only subscriber one and fails for two
-        """
-
-    def test_vsg_for_out_of_range_vlanid_in_ctag(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-	2.Create a vCPE container in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        4.From subscriber, send ping request with valid stag and vlan id in c-tag is an out of range value ( like 0,4097 )
-        4.Verify that ping fails as the ping packets drops at vCPE container entry
-        """
-
-    def test_vsg_for_out_of_range_vlanid_in_stag(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-	2.Create a vCPE container in vSG VM
-	3.Ensure vSG VM and vCPE container created properly
-        2.From subscriber, send ping request with vlan id in s-tag is an out of range value ( like 0,4097 ), with valid c-tag
-        4.Verify that ping fails as the ping packets drops at vSG VM entry
-        """
-
-    def test_vsg_for_extracting_all_compute_stats_from_all_vcpe_containers(self):
-	"""
-	Test Method:
-	1.Create a vSG VM in compute node
-	2.Create 10 vCPE containers in VM
-	3.Ensure vSG VM and vCPE containers created properly
-	4.Login to all vCPE containers
-	4.Get all compute stats from all vCPE containers
-	5.Verify the stats # verification method need to add
-	"""
-
-    def test_vsg_for_extracting_dns_stats_from_all_vcpe_containers(self):
-        """
-        Test Method:
-        1.Create a vSG VM in compute node
-        2.Create 10 vCPE containers in VM
-        3.Ensure vSG VM and vCPE containers created properly
-	4.From  10 subscribers, send ping to valid and invalid dns hosts
-        5.Verify dns resolves and ping success for valid dns hosts
-	6.Verify ping fails for invalid dns hosts
-        7.Verify dns host name resolve flows in OvS
-	8.Login to all 10 vCPE containers
-	9.Extract all dns stats
-	10.Verify dns stats for queries sent, queries received for dns host resolve success and failed scenarios
-        """
-        pass
diff --git a/src/test/vsg/vsg_dataplane_test.robot b/src/test/vsg/vsg_dataplane_test.robot
deleted file mode 100644
index e3f5d22..0000000
--- a/src/test/vsg/vsg_dataplane_test.robot
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2018-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-Suite Setup       Setup
-Suite Teardown    Teardown
-Test Timeout      10 minutes
-Documentation     Validates external connectivity from Cord-Tester Container through VSG Subscriber
-Library           OperatingSystem
-Library           SSHLibrary
-Library           /opt/cord/test/cord-tester/src/test/cord-api/Framework/utils/utils.py
-Library           /opt/cord/test/cord-tester/src/test/cord-api/Framework/utils/onosUtils.py
-Library           /opt/cord/test/cord-tester/src/test/cord-api/Framework/utils/openstackUtils.py
-Resource          /opt/cord/test/cord-tester/src/test/cord-api/Framework/utils/utils.robot
-
-*** Variables ***
-${pod}              qct-pod1.yml
-${vsg_data_file}    /opt/cord/test/cord-tester/src/test/cord-api/Tests/data/Ch_Subscriber.json
-
-*** Test Cases ***
-Validate Instances are ACTIVE
-    [Documentation]    Validates that all instances are ACTIVE
-    Wait Until Keyword Succeeds    300s    5s    Instances ACTIVE
-
-Validate Connectivity to All VSGs via Mgmt Interface
-    [Documentation]    Validates that all given vsg instances are reachable through the mgmt interfaces
-    ##Loop through nova ids,  get mgmt ips + compute nodes, ssh into compute node, and validate ping to mgmt_ip
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${mgmt_ip}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep management | awk '{print $5}'
-    \    ${node}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep :host | awk '{print $4}'
-    \    ${ping_result}=    Run    ssh ubuntu@${node} ping -c 1 ${mgmt_ip}
-    \    Should Contain   ${ping_result}    64 bytes from ${mgmt_ip}
-    \    Should Not Contain    ${ping_result}    100% packet loss
-
-Validate VSG External Connectivity
-    [Documentation]    Validates that the given vsg instances have external connectivity
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${mgmt_ip}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep management | awk '{print $5}'
-    \    ${node}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep :host | awk '{print $4}'
-    \    Wait Until Keyword Succeeds    300s    5s    Validate Ext Connectivity    ${node}    ${mgmt_ip}
-
-Configure X-Connects for 3 Subscribers
-    [Documentation]    Configures the cross connect on the fabric switch with s-tags for the subscribers created via control-plane tests on the correct ports
-    [Tags]    xconnect    dataplane
-    ${netcfg_init}=    onosUtils.onos_command_execute    onos-fabric    8101    netcfg
-    Log    ${netcfg_init}
-    Run    http -a onos:rocks DELETE http://onos-fabric:8181/onos/v1/network/configuration/
-    Sleep    15
-    Run    http -a onos:rocks POST http://onos-fabric:8181/onos/v1/network/configuration/ < /opt/cord/test/cord-tester/src/test/setup/${netcfg_file}
-    Sleep    15
-    Run    http -a onos:rocks DELETE http://onos-fabric:8181/onos/v1/applications/org.onosproject.segmentrouting/active
-    Sleep    15
-    Run    http -a onos:rocks POST http://onos-fabric:8181/onos/v1/applications/org.onosproject.segmentrouting/active
-    Sleep    15
-    ${netcfg}=    onosUtils.onos_command_execute    onos-fabric    8101    netcfg
-    Log    ${netcfg}
-    Should Contain    ${netcfg}    vsg-1
-    Should Contain    ${netcfg}    vsg-2
-    Should Contain    ${netcfg}    vsg-3
-    Should Contain    ${netcfg}    "vlan" : ${s_tags[0]}
-    Should Contain    ${netcfg}    "vlan" : ${s_tags[1]}
-    Should Contain    ${netcfg}    "vlan" : ${s_tags[2]}
-
-Validate VSG External Connectivity Again
-    [Documentation]    Validates that the given vsg instances have external connectivity even after onos-fabric has been re-configured
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${mgmt_ip}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep management | awk '{print $5}'
-    \    ${node}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep :host | awk '{print $4}'
-    \    Wait Until Keyword Succeeds    300s    5s    Validate Ext Connectivity    ${node}    ${mgmt_ip}
-
-Validate VCPE Containers
-    [Documentation]    Validates that vcpes containers are up in each vsg instance
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${mgmt_ip}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep management | awk '{print $5}'
-    \    ${node}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep :host | awk '{print $4}'
-    \    Wait Until Keyword Succeeds    800s    5s    Validate VCPE Container is Up    ${node}    ${mgmt_ip}
-
-Get VSG Subscriber and Tags
-    [Documentation]    Retrieves compute node connected on leaf-1 and s/c tags for that particular subscriber
-    [Tags]    dataplane
-    ${cmd}=    Set Variable    cordvtn-nodes | grep 10.6.1
-    ${cnode}=    onosUtils.onos_command_execute    onos-cord    8102    ${cmd}
-    @{cnode_on_leaf_1}=    Split String    ${cnode}
-    ${novalist}=    Run    . /opt/cord_profile/admin-openrc.sh; nova list --all-tenants | awk '{print $2}' | grep '[a-z]'
-    Log    ${novalist}
-    @{nova_ids}=    Split To Lines    ${novalist}
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${node}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep :host | awk '{print $4}'
-    \    Run Keyword If    '${node}' == '${cnode_on_leaf_1[0]}'    Exit For Loop
-    ${mgmt_ip}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep management | awk '{print $5}'
-    ## Get s/c tags for vsg
-    ${result}=    Run    ssh -o ProxyCommand="ssh -W %h:%p -l ubuntu ${cnode_on_leaf_1[0]}" ubuntu@${mgmt_ip} "sudo docker ps|grep 'vsg\\|vcpe'" | awk '{print $10}'
-    @{tags}=    Split String    ${result}    -
-    ${s_tag}=    Set Variable    ${tags[1]}
-    ${c_tag}=    Set Variable    ${tags[2]}
-    Set Suite Variable    ${s_tag}
-    Set Suite Variable    ${c_tag}
-
-Execute Dataplane Test
-    [Documentation]    Configures interfaces on cord-tester container to connect to vsg instance and validates traffic
-    [Tags]    dataplane
-    ${i_num}=    Set Variable If
-    ...    '${s_tag}' == '${s_tags[0]}'    1
-    ...    '${s_tag}' == '${s_tags[1]}'    2
-    ...    '${s_tag}' == '${s_tags[2]}'    3
-    ${output}=    Run    docker exec cord-tester1 bash -c "sudo echo 'nameserver 192.168.0.1' > /etc/resolv.conf"
-    ${output}=    Run    docker exec cord-tester1 bash -c "sudo dhclient vcpe${i_num}.${s_tag}.${c_tag}"
-    Sleep    5
-    ${output}=    Run    docker exec cord-tester1 bash -c "sudo route add default gw 192.168.0.1 vcpe${i_num}.${s_tag}.${c_tag}"
-    ${output}=    Run    docker exec cord-tester1 bash -c "ping -c 3 -I vcpe${i_num}.${s_tag}.${c_tag} 8.8.8.8"
-    Log To Console    \n ${output}
-    Should Contain   ${output}    64 bytes from 8.8.8.8
-    Should Not Contain    ${output}    100% packet loss
-
-*** Keywords ***
-Setup
-    [Documentation]    Gets global vars for test suite
-    @{s_tags}=    Create List
-    @{c_tags}=    Create List
-    ${netcfg_file}=    Set Variable If
-    ...    '${pod}' == 'qct-pod1.yml'    qct_fabric_test_netcfg.json
-    ...    '${pod}' == 'flex-pod1.yml'    flex_fabric_test_netcfg.json
-    ...    '${pod}' == 'calix-pod1.yml'    calix_fabric_test_netcfg.json
-    Set Suite Variable    ${netcfg_file}
-    ${subscriberList} =    utils.jsonToList    ${vsg_data_file}    SubscriberInfo
-    Set Suite Variable    ${slist}    ${subscriberList}
-    ${voltTenantList} =    Get Variable Value    ${slist}
-    ${vsg_count}=    Get Length    ${slist}
-    Set Suite Variable    ${vsg_count}
-    : FOR    ${INDEX}    IN RANGE    0    ${vsg_count}
-    \    ${s_tag}=    Get From Dictionary    ${slist[${INDEX}]}    s_tag
-    \    ${c_tag}=    Get From Dictionary    ${slist[${INDEX}]}    c_tag
-    \    Append To List    ${s_tags}    ${s_tag}
-    \    Append To List    ${c_tags}    ${c_tag}
-    @{nova_ids}=    Wait Until Keyword Succeeds    120s    5s    Validate Number of VSGs    ${vsg_count}
-    Set Suite Variable    @{nova_ids}
-    Set Suite Variable    ${s_tags}
-    Set Suite Variable    ${c_tags}
-
-Teardown
-    ${cmd}=    Set Variable    log:display
-    ${onos_logs}=    onosUtils.onos_command_execute    onos-fabric    8101    ${cmd}
-    Log    ${onos_logs}
-
-Validate Number of VSGs
-    [Arguments]    ${count}
-    ${novalist}=    Run    . /opt/cord_profile/admin-openrc.sh; nova list --all-tenants | awk '{print $2}' | grep '[a-z]'
-    Log    ${novalist}
-    @{nova_ids}=    Split To Lines    ${novalist}
-    ${vsgCount}=    Get Length    ${nova_ids}
-    Should Be Equal    ${vsgCount}    ${count}
-    [Return]    @{nova_ids}
-
-Instances ACTIVE
-    : FOR    ${nova_id}    IN    @{nova_ids}
-    \    ${status}=    Run    . /opt/cord_profile/admin-openrc.sh; nova show ${nova_id} | grep status | awk '{print $4}'
-    \    Should Be Equal    ${status}    ACTIVE
-
-Validate Ext Connectivity
-    [Arguments]    ${compute_node}    ${vsg_ip}
-    ${ping_ext_result}=    Run    ssh -o ProxyCommand="ssh -W %h:%p -l ubuntu ${compute_node}" ubuntu@${vsg_ip} "ping -c 3 8.8.8.8"
-    Should Contain   ${ping_ext_result}    64 bytes from 8.8.8.8
-    Should Not Contain    ${ping_ext_result}    100% packet loss
-
-Validate VCPE Container is Up
-    [Arguments]    ${compute_node}    ${vsg_ip}
-    ${docker_containers}=    Run    ssh -o ProxyCommand="ssh -W %h:%p -l ubuntu ${compute_node}" ubuntu@${vsg_ip} sudo docker ps | wc -l
-    Should Not Contain   ${docker_containers}    0
diff --git a/src/test/xos/__init__.py b/src/test/xos/__init__.py
deleted file mode 100644
index f9668ba..0000000
--- a/src/test/xos/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os,sys
-import logging
-logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
-##add the python path to lookup the utils
-working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
-utils_dir = os.path.join(working_dir, '../utils')
-fsm_dir = os.path.join(working_dir, '../fsm')
-cli_dir = os.path.join(working_dir, '../cli')
-subscriber_dir = os.path.join(working_dir, '../subscriber')
-__path__.append(utils_dir)
-__path__.append(fsm_dir)
-__path__.append(cli_dir)
-__path__.append(subscriber_dir)
diff --git a/src/test/xos/xosTest.py b/src/test/xos/xosTest.py
deleted file mode 100644
index bb15b0b..0000000
--- a/src/test/xos/xosTest.py
+++ /dev/null
@@ -1,540 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#
-# Copyright 2016-present Ciena Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import unittest
-import subprocess
-from docker import Client
-from itertools import chain
-from nose.tools import *
-from CordContainer import *
-from CordTestUtils import log_test as log
-import threading
-import time
-import os
-import json
-import pexpect
-import urllib
-log.setLevel('INFO')
-
-flatten = lambda l: chain.from_iterable(l)
-
-class xos_exchange(unittest.TestCase):
-
-    dckr = Client()
-    test_path = os.path.dirname(os.path.realpath(__file__))
-    XOS_BASE_CONTAINER_IMAGE = 'xosproject/xos-base:latest'
-    XOS_BASE_CONTAINER_NAME = 'xos-base'
-    XOS_BASE_CONTAINER_PORTS = [8000]
-    XOS_SYN_OPENSTACK_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-openstack'
-    XOS_SYN_OPENSTACK_CONTAINER_NAME = 'xos-synchronizer'
-    XOS_SYN_OPENSTACK_CONTAINER_PORTS = [8000]
-    XOS_POSTGRESQL_CONTAINER_IMAGE = 'xosproject/xos-postgres'
-    XOS_POSTGRESQL_CONTAINER_NAME = 'xos-db-postgres'
-    XOS_POSTGRESQL_CONTAINER_PORTS = [5432]
-    XOS_SYNDICATE_MS_CONTAINER_IMAGE = 'xosproject/syndicate-ms'
-    XOS_SYNDICATE_MS_CONTAINER_NAME = 'xos-syndicate-ms'
-    XOS_SYNDICATE_MS_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vtr'
-    XOS_SYNCHRONIZER_VTR_CONTAINER_NAME = 'xos-synchronizer-vtr'
-    XOS_SYNCHRONIZER_VTR_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vsg'
-    XOS_SYNCHRONIZER_VSG_CONTAINER_NAME = 'xos-synchronizer-vsg'
-    XOS_SYNCHRONIZER_VSG_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-onos'
-    XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME = 'xos-synchronizer-onos'
-    XOS_SYNCHRONIZER_ONOS_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-fabric'
-    XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME = 'xos-synchronizer-fabric'
-    XOS_SYNCHRONIZER_FABRIC_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-vtn'
-    XOS_SYNCHRONIZER_VTN_CONTAINER_NAME = 'xos-synchronizer-vtn'
-    XOS_SYNCHRONIZER_VTN_CONTAINER_PORTS = [8080]
-    XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE = 'xosproject/xos-synchronizer-onboarding'
-    XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME = 'xos-synchronizer-onboarding'
-    XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_PORTS = [8080]
-    XOS_API_ERROR_STRING_MATCH_1 = 'The resource you\'re looking for doesn\'t exist'
-    XOS_API_ERROR_STRING_MATCH_2 = 'Application Error'
-    XOS_API_UTILS_POST_LOGIN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/login/'
-    #XOS_API_UTILS_GET_PORTFORWARDING = 'https://private-anon-873978896e-xos.apiary-mock.com/api/portforwarding/port'
-    XOS_API_UTILS_GET_PORT_FORWARDING = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/portforwarding/'
-    XOS_API_UTILS_GET_SLICES_PLUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/slicesplus/'
-    XOS_API_UTILS_GET_SYNCHRONIZER = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/synchronizer/'
-    XOS_API_UTILS_GET_ONBOARDING_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/onboarding/service/ready'
-    XOS_API_UTILS_POST_TOSCA_RECIPE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/tosca/run/'
-    XOS_API_UTILS_GET_SSH_KEYS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/utility/sshkeys/'
-    XOS_API_TENANT_GET_ALL_SUBSCRIBERS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
-    XOS_API_TENANT_GET_SUBSCRIBER_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
-    XOS_API_TENANT_DELETE_SUBSCRIBER = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/'
-    XOS_API_TENANT_GET_SUBSCRIBER_FEATURE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/'
-    XOS_API_TENANT_GET_READ_SUBSCRIBER_UPLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uplink_speed/'
-    XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_UPLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uplink_speed/'
-    XOS_API_TENANT_GET_READ_SUBSCRIBER_DOWNLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/downlink_speed/'
-    XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_DOWNLINK_SPEED = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/downlink_speed/'
-    XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_CDN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/cdn/'
-    XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_CDN = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/cdn/'
-    XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_UVERSE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uverse/'
-    XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_UVERSE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/uverse/'
-    XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/status/'
-    XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_STATUS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/subscriber/subscriber_id/features/status/'
-    XOS_API_TENANT_GET_ALL_TRUCKROLL = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
-    XOS_API_TENANT_POST_CREATE_TRUCKROLL = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
-    XOS_API_TENANT_GET_TRUCKROLL_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
-    XOS_API_TENANT_DELETE_TRUCKROLL_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/truckroll/truckroll_id/'
-    XOS_API_TENANT_GET_ALL_vOLT = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
-    XOS_API_TENANT_POST_CREATE_vOLT = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
-    XOS_API_TENANT_GET_vOLT_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/cord/volt/volt_id/'
-    XOS_API_TENANT_GET_ALL_ONOS_APPS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/tenant/onos/app/'
-    XOS_API_SERVICE_GET_ALL_EXAMPLE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/exampleservice/'
-    XOS_API_SERVICE_GET_ALL_ONOS_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/onos/'
-    XOS_API_SERVICE_GET_ALL_vSG = 'https://private-anon-873978896e-xos.apiary-mock.com/api/service/vsg/'
-    XOS_API_CORE_GET_ALL_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
-    XOS_API_CORE_POST_CREATE_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
-    XOS_API_CORE_GET_DEPLOYMENT_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
-    XOS_API_CORE_DELETE_DEPLOYMENTS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/deployments/id/'
-    XOS_API_CORE_GET_ALL_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavoryys/id/'
-    XOS_API_CORE_POST_CREATE_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
-    XOS_API_CORE_GET_FLAVOR_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
-    XOS_API_CORE_DELETE_FLAVORS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/flavors/id/'
-    XOS_API_CORE_GET_ALL_INSTANCES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/'
-    XOS_API_CORE_POST_CREATE_INSTANCES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/?no_hyperlinks=1'
-    XOS_API_CORE_GET_INSTANCE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/id/'
-    XOS_API_CORE_DELETE_INSTANCES= 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/instances/id/'
-    XOS_API_CORE_GET_ALL_NODES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/nodes/id/'
-    XOS_API_CORE_GET_ALL_SERVICES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
-    XOS_API_CORE_POST_CREATE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
-    XOS_API_CORE_GET_SERVICE_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
-    XOS_API_CORE_DELETE_SERVICE = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/services/id/'
-    XOS_API_CORE_GET_ALL_SITES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/sites/'
-    XOS_API_CORE_GET_SITES_DETAILS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/sites/id/'
-    XOS_API_CORE_GET_ALL_SLICES = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/slices/id/'
-    XOS_API_CORE_GET_ALL_USERS = 'https://private-anon-873978896e-xos.apiary-mock.com/api/core/users/id/'
-
-
-    def setUp(self):
-        ''' Activate the XOS containers'''
-        self.maxDiff = None ##for assert_equal compare outputs on failure
-
-    def tearDown(self):
-        '''Deactivate the xos containers'''
-        log.info('Tear down setup')
-        self.CURRENT_PORT_NUM = 4
-
-    def exists(self, name):
-        return '/{0}'.format(name) in list(flatten(n['Names'] for n in self.dckr.containers()))
-
-
-    def img_exists(self, image):
-        cnt = filter(lambda c: c['Image'] == image, self.dckr.containers())
-        return image in [ctn['RepoTags'][0] if ctn['RepoTags'] else '' for ctn in self.dckr.images()]
-
-    def xos_containers_check(self, name, image):
-           if self.exists(name) != True:
-              if name == self.XOS_BASE_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosBase = Xos_base(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYN_OPENSTACK_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSynchronizerOpenstack(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_POSTGRESQL_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosPostgresql = XosPostgresql(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNDICATE_MS_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSyndicateMs = XosSyndicateMs(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSyncVtr(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSyncVsg(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSyncOnos(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSyncFabric(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSyncVtn(prefix = Container.IMAGE_PREFIX, update = False)
-              if name == self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME:
-                 log.info('%s container is not running, hence build and run it, waiting until container is up' %name)
-                 xosSynOpenstack = XosSynchronizerOnboarding(prefix = Container.IMAGE_PREFIX, update = False)
-              if self.img_exists(image) != True:
-                 log.info('%s container image is not built on host' %name)
-                 assert_equal(False, True)
-              if self.exists(name) != True:
-                 log.info('%s container image is build on host' %name)
-                 assert_equal(False, True)
-
-    def container_status(self, image, name):
-        ''' This function is checking that container is up and running'''
-        self.xos_containers_check(name, image)
-        container_info = self.dckr.containers(filters ={'name':name, 'status':'running'})
-        log.info('Xos container info= %s' %container_info)
-
-        if not container_info:
-           ## forcely failing test case
-           log.info('%s container is not running, container info %s' %(name,container_info))
-           assert_equal(False, True)
-        else:
-           container_status = container_info[0]['Status']
-           log.info('Xos container status= %s' %container_status)
-           assert_equal(container_status.split(' ')[0], 'Up')
-           return container_info
-
-    def container_ping(self, image, name):
-        ''' This function is checking if container is reachable '''
-        container_info = self.container_status(image= image, name= name)
-        container_ip = container_info[0]['NetworkSettings']['Networks']['bridge']['IPAddress']
-        ping_status = os.system('ping {} -c 3'.format(container_ip))
-        if ping_status != 0:
-           log.info('%s container is not reachable, response %s = '%(name,ping_status))
-           assert_equal(ping_status, 0)
-        log.info('%s container is not reachable, response = %s'%(name,ping_status))
-        assert_equal(ping_status, 0)
-
-    def container_listening_ports_info(self, image, name, ports_list):
-        ''' This function is checking that container ports are as excpeted '''
-        container_public_ports = []
-        container_info = self.container_status(image= image, name= name)
-        container_ports = container_info[0]['Ports']
-        container_public_ports.append(container_ports[0]['PublicPort'])
-        log.info('%s container is listening on these ports = %s'%(name,container_ports))
-        log.info('%s container is listening on these public ports = %s'%(name,container_public_ports))
-        for n in range(0,len(ports_list)):
-            port = ports_list[n]
-            if port in container_public_ports:
-               assert_equal(True, True)
-            else:
-               log.info('%s container is not listening on %s port which is not expected' %(name,n))
-               assert_equal(False, True)
-
-    def container_stop_start(self):
-        ''' This function is checking if container is stopped and started running again'''
-
-    def validate_url_response_data(self, url):
-        ''' This function is checking url responce and cross check errors on it output '''
-        response = urllib.urlopen(url)
-        data = response.read()
-        log.info('This is PORT FORWARDING URL reponse data {}'.format(data))
-        if not data:
-           log.info('{} Url did not returned any output from opencloud setup'.format(url))
-           assert_equal(True, False)
-        if self.XOS_API_ERROR_STRING_MATCH_1 in data:
-           log.info('Not an expected output from url'.format(url))
-           assert_equal(True, False)
-        if self.XOS_API_ERROR_STRING_MATCH_2 in data:
-           log.info('Not an expected output from url'.format(url))
-           assert_equal(True, False)
-
-    @nottest
-    def test_xos_base_container_status(self):
-        self.container_status(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_base_container_ping(self):
-        self.container_ping(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_base_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_BASE_CONTAINER_IMAGE, name = self.XOS_BASE_CONTAINER_NAME,
-                                             ports_list = self.XOS_BASE_CONTAINER_PORTS)
-
-    def test_xos_sync_openstack_container_status(self):
-        self.container_status(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE, name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME)
-
-    def test_xos_sync_openstack_container_ping(self):
-        self.container_ping(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE, name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME)
-
-    def test_xos_sync_openstack_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYN_OPENSTACK_CONTAINER_IMAGE,
-                                            name = self.XOS_SYN_OPENSTACK_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYN_OPENSTACK_CONTAINER_PORTS)
-
-    def test_xos_postgresql_container_status(self):
-        self.container_status(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE, name = self.XOS_POSTGRESQL_CONTAINER_NAME)
-
-    def test_xos_postgresql_container_ping(self):
-        self.container_ping(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE, name = self.XOS_POSTGRESQL_CONTAINER_NAME)
-
-    def test_xos_postgresql_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_POSTGRESQL_CONTAINER_IMAGE,
-                                            name = self.XOS_POSTGRESQL_CONTAINER_NAME,
-                                            ports_list = self.XOS_POSTGRESQL_CONTAINER_PORTS)
-
-    def test_xos_syndicate_ms_container_status(self):
-        self.container_status(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE, name = self.XOS_SYNDICATE_MS_CONTAINER_NAME)
-
-    def test_xos_syndicate_ms_container_ping(self):
-        self.container_ping(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE, name = self.XOS_SYNDICATE_MS_CONTAINER_NAME)
-
-    def test_xos_syndicate_ms_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNDICATE_MS_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNDICATE_MS_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNDICATE_MS_CONTAINER_PORTS)
-
-    @nottest
-    def test_xos_sync_vtr_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_vtr_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME)
-
-    @nottest
-    def ztest_xos_sync_vtr_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VTR_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_VTR_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_VTR_CONTAINER_PORTS)
-
-    @nottest
-    def test_xos_sync_vsg_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_vsg_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_vsg_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VSG_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_VSG_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_VSG_CONTAINER_PORTS)
-    @nottest
-    def test_xos_sync_onos_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_onos_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_onos_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_ONOS_CONTAINER_PORTS)
-    @nottest
-    def test_xos_sync_fabric_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_fabric_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_fabric_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_FABRIC_CONTAINER_PORTS)
-    @nottest
-    def test_xos_sync_vtn_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_vtn_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME)
-
-    @nottest
-    def test_xos_sync_vtn_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_VTN_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_VTN_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_VTN_CONTAINER_PORTS)
-
-    def test_xos_sync_onboarding_container_status(self):
-        self.container_status(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE)
-
-    def test_xos_sync_onboarding_container_ping(self):
-        self.container_ping(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE, name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE)
-
-    def test_xos_sync_onboarding_container_listening_ports(self):
-        self.container_listening_ports_info(image = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_IMAGE,
-                                            name = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_NAME,
-                                            ports_list = self.XOS_SYNCHRONIZER_ONBOARDING_CONTAINER_PORTS)
-
-    def test_xos_api_post_login(self):
-        response = urllib.urlopen(self.XOS_API_UTILS_POST_LOGIN)
-        data = response.read()
-
-    def test_xos_api_get_utils_port_forwarding(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_GET_PORT_FORWARDING)
-
-    def test_xos_api_get_utils_slices_plus(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SLICES_PLUS)
-
-    def test_xos_api_get_utils_synchronizer(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SYNCHRONIZER)
-
-    def test_xos_api_get_utils_onboarding_status(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_GET_ONBOARDING_STATUS)
-
-    def test_xos_api_post_utils_tosca_recipe(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_POST_TOSCA_RECIPE)
-
-    def test_xos_api_get_utils_ssh_keys(self):
-        self.validate_url_response_data(url = self.XOS_API_UTILS_GET_SSH_KEYS)
-
-    def test_xos_api_get_tenant_all_subscribers(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_SUBSCRIBERS)
-
-    def test_xos_api_get_tenant_subscribers_details(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_SUBSCRIBER_DETAILS)
-
-    def test_xos_api_get_tenant_subscriber_delete(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_DELETE_SUBSCRIBER)
-
-    def test_xos_api_get_tenant_subscribers_feature_details(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_SUBSCRIBER_FEATURE_DETAILS)
-
-    def test_xos_api_get_tenant_read_subscribers_feature_uplink_speed(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_UPLINK_SPEED)
-
-    def test_xos_api_tenant_put_update_subscribers_feature_uplink_speed(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_UPLINK_SPEED)
-
-    def test_xos_api_get_tenant_read_subscribers_feature_downlink_speed(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_DOWNLINK_SPEED)
-
-    def test_xos_api_tenant_put_update_subscribers_feature_downlink_speed(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_DOWNLINK_SPEED)
-
-    def test_xos_api_get_tenant_read_subscribers_feature_cdn(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_CDN)
-
-    def test_xos_api_tenant_put_update_subscribers_feature_cdn(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_CDN)
-
-    def test_xos_api_get_tenant_read_subscribers_feature_uverse(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_UVERSE)
-
-    def test_xos_api_tenant_put_update_subscribers_feature_uverse(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_UVERSE)
-
-    def test_xos_api_get_tenant_read_subscribers_feature_status(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_READ_SUBSCRIBER_FEATURE_STATUS)
-
-    def test_xos_api_tenant_put_update_subscribers_feature_status(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_PUT_UPDATE_SUBSCRIBER_FEATURE_STATUS)
-
-    def test_xos_api_tenant_get_all_truckroll(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_TRUCKROLL)
-
-    def test_xos_api_tenant_post_create_truckroll(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_POST_CREATE_TRUCKROLL)
-
-    def test_xos_api_tenant_get_truckroll_details(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_TRUCKROLL_DETAILS)
-
-    def test_xos_api_tenant_delete_trucroll(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_DELETE_TRUCKROLL_DETAILS)
-
-    def test_xos_api_tenant_get_all_volt(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_vOLT)
-
-    def test_xos_api_tenant_post_create_vOLT(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_POST_CREATE_vOLT)
-
-    def test_xos_api_tenant_get_volt_details(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_vOLT_DETAILS)
-
-    def test_xos_api_tenant_get_all_onos_apps(self):
-        self.validate_url_response_data(url = self.XOS_API_TENANT_GET_ALL_ONOS_APPS)
-
-    def test_xos_api_service_get_all_example_service(self):
-        self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_EXAMPLE_SERVICE)
-
-    def test_xos_api_service_get_all_onos_service(self):
-        self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_ONOS_SERVICE)
-
-    def test_xos_api_service_get_all_vsg(self):
-        self.validate_url_response_data(url = self.XOS_API_SERVICE_GET_ALL_vSG)
-
-    def test_xos_api_core_get_all_deployments(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_DEPLOYMENTS)
-
-    def test_xos_api_core_post_create_deployments(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_DEPLOYMENTS)
-
-    def test_xos_api_core_get_deployment_details(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_DEPLOYMENT_DETAILS)
-
-    def test_xos_api_core_delete_deployment(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_DEPLOYMENTS)
-
-    def test_xos_api_core_get_all_flavors(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_FLAVORS)
-
-    def test_xos_api_core_post_create_flavors(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_FLAVORS)
-
-    def test_xos_api_core_get_flavor_details(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_FLAVOR_DETAILS)
-
-    def test_xos_api_core_delete_flavors(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_FLAVORS)
-
-    def test_xos_api_core_get_all_instances(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_INSTANCES)
-
-    def test_xos_api_core_post_create_instances(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_INSTANCES)
-
-    def test_xos_api_core_get_instance_details(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_INSTANCE_DETAILS)
-
-    def test_xos_api_core_delete_instance(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_INSTANCES)
-
-    def test_xos_api_core_get_all_nodes(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_NODES)
-
-    def test_xos_api_core_get_all_services(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SERVICES)
-
-    def test_xos_api_core_post_create_service(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_POST_CREATE_SERVICE)
-
-    def test_xos_api_core_get_service_details(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_SERVICE_DETAILS)
-
-    def test_xos_api_core_delete_service(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_DELETE_SERVICE)
-
-    def test_xos_api_core_get_all_sites(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SITES)
-
-    def test_xos_api_core_get_site_details(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_SITES_DETAILS)
-
-    def test_xos_api_core_get_all_slices(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_SLICES)
-
-    def test_xos_api_core_get_all_users(self):
-        self.validate_url_response_data(url = self.XOS_API_CORE_GET_ALL_USERS)